LKML Archive on lore.kernel.org
help / color / mirror / Atom feed
* [PATCH 00/30] TI Video Decoder driver upstreaming to v5.14-rc6 kernel
@ 2021-08-18 14:10 sidraya.bj
  2021-08-18 14:10 ` [PATCH 01/30] dt-bindings: Add binding for img,d5500-vxd for DRA8x sidraya.bj
                   ` (31 more replies)
  0 siblings, 32 replies; 48+ messages in thread
From: sidraya.bj @ 2021-08-18 14:10 UTC (permalink / raw)
  To: gregkh, linux-staging, linux-kernel
  Cc: prashanth.ka, praneeth, mchehab, linux-media, praveen.ap, Sidraya

From: Sidraya <sidraya.bj@pathpartnertech.com>

This series of patches implements v4l2 based Decoder driver for H264,
H265 and MJPEG decoding standards.This Driver is for D5520 H/W decoder on
DRA8x SOC of J721e platform.
This driver has been tested on v5.14-rc6 kernel for following
decoding standards on v4l2 based Gstreamer 1.16 plug-in.
1. H264
2. H265
3. MJPEG

Note:
Currently Driver uses list, map and queue custom data structure APIs
implementation and IOMMU custom framework.We are working on replacing
customised APIs with Linux kernel generic framework APIs.
Meanwhile would like to address review comments from
reviewers before merging to main media/platform subsystem.

Sidraya (30):
  dt-bindings: Add binding for img,d5500-vxd for DRA8x
  v4l: vxd-dec: Create mmu programming helper library
  v4l: vxd-dec: Create vxd_dec Mem Manager helper library
  v4l: vxd-dec: Add vxd helper library
  v4l: vxd-dec: Add IMG VXD Video Decoder mem to mem drive
  v4l: vxd-dec: Add hardware control modules
  v4l: vxd-dec: Add vxd core module
  v4l: vxd-dec: Add translation control modules
  v4l: vxd-dec: Add idgen api modules
  v4l: vxd-dec: Add utility modules
  v4l: vxd-dec: Add TALMMU module
  v4l: vxd-dec: Add VDEC MMU wrapper
  v4l: vxd-dec: Add Bistream Preparser (BSPP) module
  v4l: vxd-dec: Add common headers
  v4l: vxd-dec: Add firmware interface headers
  v4l: vxd-dec: Add pool api modules
  v4l: vxd-dec: This patch implements resource manage component
  v4l: vxd-dec: This patch implements pixel processing library
  v4l:vxd-dec:vdecdd utility library
  v4l:vxd-dec:Decoder resource component
  v4l:vxd-dec:Decoder Core Component
  v4l:vxd-dec:vdecdd headers added
  v4l:vxd-dec:Decoder Component
  v4l:vxd-dec: Add resource manager
  v4l: videodev2: Add 10bit definitions for NV12 and NV16 color formats
  media: Kconfig: Add Video decoder kconfig and Makefile entries
  media: platform: vxd: Kconfig: Add Video decoder Kconfig and Makefile
  IMG DEC V4L2 Interface function implementations
  arm64: dts: dra82: Add v4l2 vxd_dec device node
  ARM64: ti_sdk_arm64_release_defconfig: Enable d5520 video decoder
    driver

 .../bindings/media/img,d5520-vxd.yaml         |   52 +
 MAINTAINERS                                   |  114 +
 arch/arm64/boot/dts/ti/k3-j721e-main.dtsi     |    9 +
 .../configs/ti_sdk_arm64_release_defconfig    | 7407 +++++++++++++++++
 drivers/media/v4l2-core/v4l2-ioctl.c          |    2 +
 drivers/staging/media/Kconfig                 |    2 +
 drivers/staging/media/Makefile                |    1 +
 drivers/staging/media/vxd/common/addr_alloc.c |  499 ++
 drivers/staging/media/vxd/common/addr_alloc.h |  238 +
 drivers/staging/media/vxd/common/dq.c         |  248 +
 drivers/staging/media/vxd/common/dq.h         |   36 +
 drivers/staging/media/vxd/common/hash.c       |  481 ++
 drivers/staging/media/vxd/common/hash.h       |   86 +
 drivers/staging/media/vxd/common/idgen_api.c  |  449 +
 drivers/staging/media/vxd/common/idgen_api.h  |   59 +
 drivers/staging/media/vxd/common/img_errors.h |  104 +
 drivers/staging/media/vxd/common/img_mem.h    |   43 +
 .../staging/media/vxd/common/img_mem_man.c    | 1124 +++
 .../staging/media/vxd/common/img_mem_man.h    |  231 +
 .../media/vxd/common/img_mem_unified.c        |  276 +
 drivers/staging/media/vxd/common/imgmmu.c     |  782 ++
 drivers/staging/media/vxd/common/imgmmu.h     |  180 +
 drivers/staging/media/vxd/common/lst.c        |  119 +
 drivers/staging/media/vxd/common/lst.h        |   37 +
 drivers/staging/media/vxd/common/pool.c       |  228 +
 drivers/staging/media/vxd/common/pool.h       |   66 +
 drivers/staging/media/vxd/common/pool_api.c   |  709 ++
 drivers/staging/media/vxd/common/pool_api.h   |  113 +
 drivers/staging/media/vxd/common/ra.c         |  972 +++
 drivers/staging/media/vxd/common/ra.h         |  200 +
 drivers/staging/media/vxd/common/resource.c   |  576 ++
 drivers/staging/media/vxd/common/resource.h   |   66 +
 drivers/staging/media/vxd/common/rman_api.c   |  620 ++
 drivers/staging/media/vxd/common/rman_api.h   |   66 +
 drivers/staging/media/vxd/common/talmmu_api.c |  753 ++
 drivers/staging/media/vxd/common/talmmu_api.h |  246 +
 drivers/staging/media/vxd/common/vid_buf.h    |   42 +
 drivers/staging/media/vxd/common/work_queue.c |  188 +
 drivers/staging/media/vxd/common/work_queue.h |   66 +
 drivers/staging/media/vxd/decoder/Kconfig     |   13 +
 drivers/staging/media/vxd/decoder/Makefile    |  129 +
 drivers/staging/media/vxd/decoder/bspp.c      | 2479 ++++++
 drivers/staging/media/vxd/decoder/bspp.h      |  363 +
 drivers/staging/media/vxd/decoder/bspp_int.h  |  514 ++
 drivers/staging/media/vxd/decoder/core.c      | 3656 ++++++++
 drivers/staging/media/vxd/decoder/core.h      |   72 +
 .../staging/media/vxd/decoder/dec_resources.c |  554 ++
 .../staging/media/vxd/decoder/dec_resources.h |   46 +
 drivers/staging/media/vxd/decoder/decoder.c   | 4622 ++++++++++
 drivers/staging/media/vxd/decoder/decoder.h   |  375 +
 .../staging/media/vxd/decoder/fw_interface.h  |  818 ++
 drivers/staging/media/vxd/decoder/h264_idx.h  |   60 +
 .../media/vxd/decoder/h264_secure_parser.c    | 3051 +++++++
 .../media/vxd/decoder/h264_secure_parser.h    |  278 +
 drivers/staging/media/vxd/decoder/h264_vlc.h  |  604 ++
 .../staging/media/vxd/decoder/h264fw_data.h   |  652 ++
 .../media/vxd/decoder/h264fw_data_shared.h    |  759 ++
 .../media/vxd/decoder/hevc_secure_parser.c    | 2895 +++++++
 .../media/vxd/decoder/hevc_secure_parser.h    |  455 +
 .../staging/media/vxd/decoder/hevcfw_data.h   |  472 ++
 .../media/vxd/decoder/hevcfw_data_shared.h    |  767 ++
 .../staging/media/vxd/decoder/hw_control.c    | 1211 +++
 .../staging/media/vxd/decoder/hw_control.h    |  144 +
 .../media/vxd/decoder/img_dec_common.h        |  278 +
 .../media/vxd/decoder/img_msvdx_cmds.h        |  279 +
 .../media/vxd/decoder/img_msvdx_core_regs.h   |   22 +
 .../media/vxd/decoder/img_msvdx_vdmc_regs.h   |   26 +
 .../media/vxd/decoder/img_msvdx_vec_regs.h    |   60 +
 .../staging/media/vxd/decoder/img_pixfmts.h   |  195 +
 .../media/vxd/decoder/img_profiles_levels.h   |   33 +
 .../media/vxd/decoder/img_pvdec_core_regs.h   |   60 +
 .../media/vxd/decoder/img_pvdec_pixel_regs.h  |   35 +
 .../media/vxd/decoder/img_pvdec_test_regs.h   |   39 +
 .../media/vxd/decoder/img_vdec_fw_msg.h       |  192 +
 .../vxd/decoder/img_video_bus4_mmu_regs.h     |  120 +
 .../media/vxd/decoder/jpeg_secure_parser.c    |  645 ++
 .../media/vxd/decoder/jpeg_secure_parser.h    |   37 +
 .../staging/media/vxd/decoder/jpegfw_data.h   |   83 +
 .../media/vxd/decoder/jpegfw_data_shared.h    |   84 +
 drivers/staging/media/vxd/decoder/mem_io.h    |   42 +
 drivers/staging/media/vxd/decoder/mmu_defs.h  |   42 +
 drivers/staging/media/vxd/decoder/pixel_api.c |  895 ++
 drivers/staging/media/vxd/decoder/pixel_api.h |  152 +
 .../media/vxd/decoder/pvdec_entropy_regs.h    |   33 +
 drivers/staging/media/vxd/decoder/pvdec_int.h |   82 +
 .../media/vxd/decoder/pvdec_vec_be_regs.h     |   35 +
 drivers/staging/media/vxd/decoder/reg_io2.h   |   74 +
 .../staging/media/vxd/decoder/scaler_setup.h  |   59 +
 drivers/staging/media/vxd/decoder/swsr.c      | 1657 ++++
 drivers/staging/media/vxd/decoder/swsr.h      |  278 +
 .../media/vxd/decoder/translation_api.c       | 1725 ++++
 .../media/vxd/decoder/translation_api.h       |   42 +
 drivers/staging/media/vxd/decoder/vdec_defs.h |  548 ++
 .../media/vxd/decoder/vdec_mmu_wrapper.c      |  829 ++
 .../media/vxd/decoder/vdec_mmu_wrapper.h      |  174 +
 .../staging/media/vxd/decoder/vdecdd_defs.h   |  446 +
 .../staging/media/vxd/decoder/vdecdd_utils.c  |   95 +
 .../staging/media/vxd/decoder/vdecdd_utils.h  |   93 +
 .../media/vxd/decoder/vdecdd_utils_buf.c      |  897 ++
 .../staging/media/vxd/decoder/vdecfw_share.h  |   36 +
 .../staging/media/vxd/decoder/vdecfw_shared.h |  893 ++
 drivers/staging/media/vxd/decoder/vxd_core.c  | 1683 ++++
 drivers/staging/media/vxd/decoder/vxd_dec.c   |  185 +
 drivers/staging/media/vxd/decoder/vxd_dec.h   |  477 ++
 drivers/staging/media/vxd/decoder/vxd_ext.h   |   74 +
 drivers/staging/media/vxd/decoder/vxd_int.c   | 1137 +++
 drivers/staging/media/vxd/decoder/vxd_int.h   |  128 +
 .../staging/media/vxd/decoder/vxd_mmu_defs.h  |   30 +
 drivers/staging/media/vxd/decoder/vxd_props.h |   80 +
 drivers/staging/media/vxd/decoder/vxd_pvdec.c | 1745 ++++
 .../media/vxd/decoder/vxd_pvdec_priv.h        |  126 +
 .../media/vxd/decoder/vxd_pvdec_regs.h        |  779 ++
 drivers/staging/media/vxd/decoder/vxd_v4l2.c  | 2129 +++++
 include/uapi/linux/videodev2.h                |    2 +
 114 files changed, 62369 insertions(+)
 create mode 100644 Documentation/devicetree/bindings/media/img,d5520-vxd.yaml
 create mode 100644 arch/arm64/configs/ti_sdk_arm64_release_defconfig
 create mode 100644 drivers/staging/media/vxd/common/addr_alloc.c
 create mode 100644 drivers/staging/media/vxd/common/addr_alloc.h
 create mode 100644 drivers/staging/media/vxd/common/dq.c
 create mode 100644 drivers/staging/media/vxd/common/dq.h
 create mode 100644 drivers/staging/media/vxd/common/hash.c
 create mode 100644 drivers/staging/media/vxd/common/hash.h
 create mode 100644 drivers/staging/media/vxd/common/idgen_api.c
 create mode 100644 drivers/staging/media/vxd/common/idgen_api.h
 create mode 100644 drivers/staging/media/vxd/common/img_errors.h
 create mode 100644 drivers/staging/media/vxd/common/img_mem.h
 create mode 100644 drivers/staging/media/vxd/common/img_mem_man.c
 create mode 100644 drivers/staging/media/vxd/common/img_mem_man.h
 create mode 100644 drivers/staging/media/vxd/common/img_mem_unified.c
 create mode 100644 drivers/staging/media/vxd/common/imgmmu.c
 create mode 100644 drivers/staging/media/vxd/common/imgmmu.h
 create mode 100644 drivers/staging/media/vxd/common/lst.c
 create mode 100644 drivers/staging/media/vxd/common/lst.h
 create mode 100644 drivers/staging/media/vxd/common/pool.c
 create mode 100644 drivers/staging/media/vxd/common/pool.h
 create mode 100644 drivers/staging/media/vxd/common/pool_api.c
 create mode 100644 drivers/staging/media/vxd/common/pool_api.h
 create mode 100644 drivers/staging/media/vxd/common/ra.c
 create mode 100644 drivers/staging/media/vxd/common/ra.h
 create mode 100644 drivers/staging/media/vxd/common/resource.c
 create mode 100644 drivers/staging/media/vxd/common/resource.h
 create mode 100644 drivers/staging/media/vxd/common/rman_api.c
 create mode 100644 drivers/staging/media/vxd/common/rman_api.h
 create mode 100644 drivers/staging/media/vxd/common/talmmu_api.c
 create mode 100644 drivers/staging/media/vxd/common/talmmu_api.h
 create mode 100644 drivers/staging/media/vxd/common/vid_buf.h
 create mode 100644 drivers/staging/media/vxd/common/work_queue.c
 create mode 100644 drivers/staging/media/vxd/common/work_queue.h
 create mode 100644 drivers/staging/media/vxd/decoder/Kconfig
 create mode 100644 drivers/staging/media/vxd/decoder/Makefile
 create mode 100644 drivers/staging/media/vxd/decoder/bspp.c
 create mode 100644 drivers/staging/media/vxd/decoder/bspp.h
 create mode 100644 drivers/staging/media/vxd/decoder/bspp_int.h
 create mode 100644 drivers/staging/media/vxd/decoder/core.c
 create mode 100644 drivers/staging/media/vxd/decoder/core.h
 create mode 100644 drivers/staging/media/vxd/decoder/dec_resources.c
 create mode 100644 drivers/staging/media/vxd/decoder/dec_resources.h
 create mode 100644 drivers/staging/media/vxd/decoder/decoder.c
 create mode 100644 drivers/staging/media/vxd/decoder/decoder.h
 create mode 100644 drivers/staging/media/vxd/decoder/fw_interface.h
 create mode 100644 drivers/staging/media/vxd/decoder/h264_idx.h
 create mode 100644 drivers/staging/media/vxd/decoder/h264_secure_parser.c
 create mode 100644 drivers/staging/media/vxd/decoder/h264_secure_parser.h
 create mode 100644 drivers/staging/media/vxd/decoder/h264_vlc.h
 create mode 100644 drivers/staging/media/vxd/decoder/h264fw_data.h
 create mode 100644 drivers/staging/media/vxd/decoder/h264fw_data_shared.h
 create mode 100644 drivers/staging/media/vxd/decoder/hevc_secure_parser.c
 create mode 100644 drivers/staging/media/vxd/decoder/hevc_secure_parser.h
 create mode 100644 drivers/staging/media/vxd/decoder/hevcfw_data.h
 create mode 100644 drivers/staging/media/vxd/decoder/hevcfw_data_shared.h
 create mode 100644 drivers/staging/media/vxd/decoder/hw_control.c
 create mode 100644 drivers/staging/media/vxd/decoder/hw_control.h
 create mode 100644 drivers/staging/media/vxd/decoder/img_dec_common.h
 create mode 100644 drivers/staging/media/vxd/decoder/img_msvdx_cmds.h
 create mode 100644 drivers/staging/media/vxd/decoder/img_msvdx_core_regs.h
 create mode 100644 drivers/staging/media/vxd/decoder/img_msvdx_vdmc_regs.h
 create mode 100644 drivers/staging/media/vxd/decoder/img_msvdx_vec_regs.h
 create mode 100644 drivers/staging/media/vxd/decoder/img_pixfmts.h
 create mode 100644 drivers/staging/media/vxd/decoder/img_profiles_levels.h
 create mode 100644 drivers/staging/media/vxd/decoder/img_pvdec_core_regs.h
 create mode 100644 drivers/staging/media/vxd/decoder/img_pvdec_pixel_regs.h
 create mode 100644 drivers/staging/media/vxd/decoder/img_pvdec_test_regs.h
 create mode 100644 drivers/staging/media/vxd/decoder/img_vdec_fw_msg.h
 create mode 100644 drivers/staging/media/vxd/decoder/img_video_bus4_mmu_regs.h
 create mode 100644 drivers/staging/media/vxd/decoder/jpeg_secure_parser.c
 create mode 100644 drivers/staging/media/vxd/decoder/jpeg_secure_parser.h
 create mode 100644 drivers/staging/media/vxd/decoder/jpegfw_data.h
 create mode 100644 drivers/staging/media/vxd/decoder/jpegfw_data_shared.h
 create mode 100644 drivers/staging/media/vxd/decoder/mem_io.h
 create mode 100644 drivers/staging/media/vxd/decoder/mmu_defs.h
 create mode 100644 drivers/staging/media/vxd/decoder/pixel_api.c
 create mode 100644 drivers/staging/media/vxd/decoder/pixel_api.h
 create mode 100644 drivers/staging/media/vxd/decoder/pvdec_entropy_regs.h
 create mode 100644 drivers/staging/media/vxd/decoder/pvdec_int.h
 create mode 100644 drivers/staging/media/vxd/decoder/pvdec_vec_be_regs.h
 create mode 100644 drivers/staging/media/vxd/decoder/reg_io2.h
 create mode 100644 drivers/staging/media/vxd/decoder/scaler_setup.h
 create mode 100644 drivers/staging/media/vxd/decoder/swsr.c
 create mode 100644 drivers/staging/media/vxd/decoder/swsr.h
 create mode 100644 drivers/staging/media/vxd/decoder/translation_api.c
 create mode 100644 drivers/staging/media/vxd/decoder/translation_api.h
 create mode 100644 drivers/staging/media/vxd/decoder/vdec_defs.h
 create mode 100644 drivers/staging/media/vxd/decoder/vdec_mmu_wrapper.c
 create mode 100644 drivers/staging/media/vxd/decoder/vdec_mmu_wrapper.h
 create mode 100644 drivers/staging/media/vxd/decoder/vdecdd_defs.h
 create mode 100644 drivers/staging/media/vxd/decoder/vdecdd_utils.c
 create mode 100644 drivers/staging/media/vxd/decoder/vdecdd_utils.h
 create mode 100644 drivers/staging/media/vxd/decoder/vdecdd_utils_buf.c
 create mode 100644 drivers/staging/media/vxd/decoder/vdecfw_share.h
 create mode 100644 drivers/staging/media/vxd/decoder/vdecfw_shared.h
 create mode 100644 drivers/staging/media/vxd/decoder/vxd_core.c
 create mode 100644 drivers/staging/media/vxd/decoder/vxd_dec.c
 create mode 100644 drivers/staging/media/vxd/decoder/vxd_dec.h
 create mode 100644 drivers/staging/media/vxd/decoder/vxd_ext.h
 create mode 100644 drivers/staging/media/vxd/decoder/vxd_int.c
 create mode 100644 drivers/staging/media/vxd/decoder/vxd_int.h
 create mode 100644 drivers/staging/media/vxd/decoder/vxd_mmu_defs.h
 create mode 100644 drivers/staging/media/vxd/decoder/vxd_props.h
 create mode 100644 drivers/staging/media/vxd/decoder/vxd_pvdec.c
 create mode 100644 drivers/staging/media/vxd/decoder/vxd_pvdec_priv.h
 create mode 100644 drivers/staging/media/vxd/decoder/vxd_pvdec_regs.h
 create mode 100644 drivers/staging/media/vxd/decoder/vxd_v4l2.c

-- 
2.17.1


-- 






This
message contains confidential information and is intended only 
for the
individual(s) named. If you are not the intended
recipient, you are 
notified that disclosing, copying, distributing or taking any
action in 
reliance on the contents of this mail and attached file/s is strictly
prohibited. Please notify the
sender immediately and delete this e-mail 
from your system. E-mail transmission
cannot be guaranteed to be secured or 
error-free as information could be
intercepted, corrupted, lost, destroyed, 
arrive late or incomplete, or contain
viruses. The sender therefore does 
not accept liability for any errors or
omissions in the contents of this 
message, which arise as a result of e-mail
transmission.

^ permalink raw reply	[flat|nested] 48+ messages in thread

* [PATCH 01/30] dt-bindings: Add binding for img,d5500-vxd for DRA8x
  2021-08-18 14:10 [PATCH 00/30] TI Video Decoder driver upstreaming to v5.14-rc6 kernel sidraya.bj
@ 2021-08-18 14:10 ` sidraya.bj
  2021-08-18 14:10 ` [PATCH 02/30] v4l: vxd-dec: Create mmu programming helper library sidraya.bj
                   ` (30 subsequent siblings)
  31 siblings, 0 replies; 48+ messages in thread
From: sidraya.bj @ 2021-08-18 14:10 UTC (permalink / raw)
  To: gregkh, linux-staging, linux-kernel
  Cc: prashanth.ka, praneeth, mchehab, linux-media, praveen.ap, Sidraya

From: Sidraya <sidraya.bj@pathpartnertech.com>

Add the dt-binding for the img,d5500-vxd node for DRA8x.

Signed-off-by: Angela Stegmaier <angelabaker@ti.com>
Signed-off-by: Sidraya <sidraya.bj@pathpartnertech.com>
---
 .../bindings/media/img,d5520-vxd.yaml         | 52 +++++++++++++++++++
 MAINTAINERS                                   |  7 +++
 2 files changed, 59 insertions(+)
 create mode 100644 Documentation/devicetree/bindings/media/img,d5520-vxd.yaml

diff --git a/Documentation/devicetree/bindings/media/img,d5520-vxd.yaml b/Documentation/devicetree/bindings/media/img,d5520-vxd.yaml
new file mode 100644
index 000000000000..812a431336a2
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/img,d5520-vxd.yaml
@@ -0,0 +1,52 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/media/img,d5520-vxd.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Imagination D5520-VXD Driver
+
+maintainers:
+  - Sidraya Jayagond <sidraya.bj@pathpartnertech.com>
+  - Prashanth Kumar Amai <prashanth.ka@pathpartnertech.com>
+
+description: |
+ The IMG VXD video decode driver for the D5500-VXD is a video decoder for
+ multiple video formats including H.264 and HEVC on the TI J721E family
+ of SoCs.
+
+properties:
+  compatible:
+    const: img,d5500-vxd
+
+  reg:
+    maxItems: 2
+
+  interrupts:
+    maxItems: 1
+
+  power-domains:
+    maxItems: 1
+
+required:
+  - compatible
+  - reg
+  - interrupts
+
+additionalProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/interrupt-controller/irq.h>
+    #include <dt-bindings/interrupt-controller/arm-gic.h>
+
+    d5520: video-decoder@4300000 {
+            /* IMG D5520 driver configuration */
+            compatible = "img,d5500-vxd";
+            reg = <0x00 0x04300000>,
+                  <0x00 0x100000>;
+            power-domains = <&k3_pds 144>;
+            interrupts = <GIC_SPI 180 IRQ_TYPE_LEVEL_HIGH>;
+    };
+
+...
diff --git a/MAINTAINERS b/MAINTAINERS
index fd25e4ecf0b9..163b3176ccf9 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -19531,6 +19531,13 @@ W:	https://linuxtv.org
 T:	git git://linuxtv.org/media_tree.git
 F:	drivers/media/test-drivers/vicodec/*
 
+VIDEO DECODER DRIVER FOR TI DRA8XX/J721E
+M:	Prashant Amai <prashanth.ka@pathpartnertech.com>
+M:	Sidraya Jayagond <sidraya.bj@pathpartnertech.com>
+L:	linux-media@vger.kernel.org
+S:	Maintained
+F:	Documentation/devicetree/bindings/media/img,d5520-vxd.yaml
+
 VIDEO I2C POLLING DRIVER
 M:	Matt Ranostay <matt.ranostay@konsulko.com>
 L:	linux-media@vger.kernel.org
-- 
2.17.1


-- 






This
message contains confidential information and is intended only 
for the
individual(s) named. If you are not the intended
recipient, you are 
notified that disclosing, copying, distributing or taking any
action in 
reliance on the contents of this mail and attached file/s is strictly
prohibited. Please notify the
sender immediately and delete this e-mail 
from your system. E-mail transmission
cannot be guaranteed to be secured or 
error-free as information could be
intercepted, corrupted, lost, destroyed, 
arrive late or incomplete, or contain
viruses. The sender therefore does 
not accept liability for any errors or
omissions in the contents of this 
message, which arise as a result of e-mail
transmission.

^ permalink raw reply related	[flat|nested] 48+ messages in thread

* [PATCH 02/30] v4l: vxd-dec: Create mmu programming helper library
  2021-08-18 14:10 [PATCH 00/30] TI Video Decoder driver upstreaming to v5.14-rc6 kernel sidraya.bj
  2021-08-18 14:10 ` [PATCH 01/30] dt-bindings: Add binding for img,d5500-vxd for DRA8x sidraya.bj
@ 2021-08-18 14:10 ` sidraya.bj
  2021-08-18 14:37   ` Greg KH
  2021-08-18 14:10 ` [PATCH 03/30] v4l: vxd-dec: Create vxd_dec Mem Manager " sidraya.bj
                   ` (29 subsequent siblings)
  31 siblings, 1 reply; 48+ messages in thread
From: sidraya.bj @ 2021-08-18 14:10 UTC (permalink / raw)
  To: gregkh, linux-staging, linux-kernel
  Cc: prashanth.ka, praneeth, mchehab, linux-media, praveen.ap, Sidraya

From: Sidraya <sidraya.bj@pathpartnertech.com>

The IMG D5520 has an MMU which needs to be programmed with all
memory which it needs access to. This includes input buffers,
output buffers and parameter buffers for each decode instance,
as well as common buffers for firmware, etc.

Functions are provided for creating MMU directories (each stream
will have it's own MMU context), retrieving the directory page,
and mapping/unmapping a buffer into the MMU for a specific MMU context.

Also helper(s) are provided for querying the capabilities of the MMU.

Signed-off-by: Buddy Liong <buddy.liong@ti.com>
Signed-off-by: Angela Stegmaier <angelabaker@ti.com>
Signed-off-by: Sidraya <sidraya.bj@pathpartnertech.com>
---
 MAINTAINERS                               |   2 +
 drivers/staging/media/vxd/common/imgmmu.c | 782 ++++++++++++++++++++++
 drivers/staging/media/vxd/common/imgmmu.h | 180 +++++
 3 files changed, 964 insertions(+)
 create mode 100644 drivers/staging/media/vxd/common/imgmmu.c
 create mode 100644 drivers/staging/media/vxd/common/imgmmu.h

diff --git a/MAINTAINERS b/MAINTAINERS
index 163b3176ccf9..2e921650a14c 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -19537,6 +19537,8 @@ M:	Sidraya Jayagond <sidraya.bj@pathpartnertech.com>
 L:	linux-media@vger.kernel.org
 S:	Maintained
 F:	Documentation/devicetree/bindings/media/img,d5520-vxd.yaml
+F:	drivers/staging/media/vxd/common/imgmmu.c
+F:	drivers/staging/media/vxd/common/imgmmu.h
 
 VIDEO I2C POLLING DRIVER
 M:	Matt Ranostay <matt.ranostay@konsulko.com>
diff --git a/drivers/staging/media/vxd/common/imgmmu.c b/drivers/staging/media/vxd/common/imgmmu.c
new file mode 100644
index 000000000000..ce2f41f72485
--- /dev/null
+++ b/drivers/staging/media/vxd/common/imgmmu.c
@@ -0,0 +1,782 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * IMG DEC MMU function implementations
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ *	Angela Stegmaier <angelabaker@ti.com>
+ *
+ * Re-written for upstreamimg
+ *	Sidraya Jayagond <sidraya.bj@pathpartnertech.com>
+ */
+
+#include <linux/dma-mapping.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-mem2mem.h>
+#include "img_mem_man.h"
+#include "imgmmu.h"
+
+/**
+ * struct mmu_directory - the MMU directory information
+ * @dir_page: pointer to the mmu_page_cfg_table (physical table used) which
+ *	      this mmu_directory belongs to
+ * @dir_page_table: All the page table structures in a static array of pointers
+ * @mmu_info_cfg: Functions to use to manage pages allocation, liberation and
+ *		  writing
+ * @num_mapping: number of mapping using this directory
+ */
+struct mmu_directory {
+	struct mmu_page_cfg *dir_page;
+	struct mmu_page_cfg_table **dir_page_table;
+	struct mmu_info mmu_info_cfg;
+	unsigned int num_mapping;
+};
+
+/*
+ * struct mmu_map - the MMU mapping information
+ * @mmu_dir: pointer to the mmu_directory which this mmu_map belongs to
+ * @dev_virt_addr: device virtual address root associated with this mapping
+ * @used_flag: flag used when allocating
+ * @n_entries: number of entries mapped
+ */
+struct mmu_map {
+	struct mmu_directory *mmu_dir;
+	struct mmu_heap_alloc dev_virt_addr;
+	unsigned int used_flag;
+	unsigned int n_entries;
+};
+
+/*
+ * struct mmu_page_cfg_table - the MMU page table information.
+ *			       One page table of the directory.
+ * @mmu_dir: pointer to the mmu_directory which this mmu_page_cfg_table
+ *	     belongs to
+ * @page: page used to store this mapping in the MMU
+ * @valid_entries: number of valid entries in this page
+ */
+struct mmu_page_cfg_table {
+	struct mmu_directory *mmu_dir;
+	struct mmu_page_cfg *page;
+	unsigned int valid_entries;
+};
+
+/*
+ * mmu_pgt_destroy() - Destruction of a page table (does not follow the
+ *                     child pointer)
+ * @pgt: pointer to the MMU page table information
+ *
+ * Warning: Does not verify if pages are still valid or not
+ */
+static void mmu_pgt_destroy(struct mmu_page_cfg_table *pgt)
+{
+	if (!pgt->mmu_dir ||
+	    !pgt->mmu_dir->mmu_info_cfg.pfn_page_free ||
+	    !pgt->page) {
+		return;
+	}
+
+	pr_debug("%s:%d Destroy page table (phys addr %llu)\n",
+		 __func__, __LINE__, pgt->page->phys_addr);
+
+	pgt->mmu_dir->mmu_info_cfg.pfn_page_free(pgt->page);
+	pgt->page = NULL;
+
+	kfree(pgt);
+}
+
+/*
+ * mmu_dir_entry() - Extact the directory index from a virtual address
+ * @vaddr: virtual address
+ */
+static inline unsigned int mmu_dir_entry(unsigned long vaddr)
+{
+	return (unsigned int)((vaddr & VIRT_DIR_IDX_MASK) >> MMU_DIR_SHIFT);
+}
+
+/*
+ * mmu_pg_entry() - Extract the page table index from a virtual address
+ * @vaddr: virtual address
+ */
+static inline unsigned int mmu_pg_entry(unsigned long vaddr)
+{
+	return (unsigned int)((vaddr & VIRT_PAGE_TBL_MASK) >> MMU_PAGE_SHIFT);
+}
+
+/*
+ * mmu_pg_wr() - Default function used when a mmu_info structure has an empty
+ *		 pfn_page_write pointer
+ * @mmu_page: pointer to the mmu_page to update
+ * @offset: offset into the directory
+ * @pa_to_write: physical address value to add to the entr
+ * @mmu_flag: mmu flag(s) to set
+ */
+static void mmu_pg_wr(struct mmu_page_cfg *mmu_page, unsigned int offset,
+		      unsigned long long pa_to_write, unsigned int mmu_flag)
+{
+	unsigned int *dir_mem = NULL;
+	unsigned long long cur_pa = pa_to_write;
+
+	if (!mmu_page)
+		return;
+
+	dir_mem = (unsigned int *)mmu_page->cpu_virt_addr;
+	/*
+	 * assumes that the MMU HW has the extra-bits enabled (this default
+	 * function has no way of knowing)
+	 */
+	if ((MMU_PHYS_SIZE - MMU_VIRT_SIZE) > 0)
+		cur_pa >>= (MMU_PHYS_SIZE - MMU_VIRT_SIZE);
+	/*
+	 * The MMU_PAGE_SHIFT bottom bits should be masked because page
+	 * allocation.
+	 * MMU_PAGE_SHIFT-(MMU_PHYS_SIZE-MMU_VIRT_SIZE) are used for
+	 * flags so it's ok
+	 */
+	dir_mem[offset] = (unsigned int)cur_pa | (mmu_flag);
+}
+
+/*
+ * mmu_page_cfg_table() - Create a page table
+ * @mmu_dir: pointer to the mmu_directory in which to create the new page table
+ *	     structure
+ *
+ * Return: A pointer to the new page table structure in case of success.
+ *	   (void *) in case of error
+ */
+static struct mmu_page_cfg_table *mmu_pgt_create(struct mmu_directory *mmu_dir)
+{
+	struct mmu_page_cfg_table *neo = NULL;
+	unsigned int i;
+
+	if (!mmu_dir || !mmu_dir->mmu_info_cfg.pfn_page_alloc ||
+	    !mmu_dir->mmu_info_cfg.pfn_page_write)
+		return (void *)(-EINVAL);
+
+	neo = kmalloc(sizeof(*neo), GFP_KERNEL);
+	if (!neo)
+		return (void *)(-ENOMEM);
+
+	neo->mmu_dir = mmu_dir;
+
+	neo->page =
+		mmu_dir->mmu_info_cfg.pfn_page_alloc(mmu_dir->mmu_info_cfg.alloc_ctx);
+	if (!neo->page) {
+		pr_err("%s:%d failed to allocate Page Table physical page\n",
+		       __func__, __LINE__);
+		kfree(neo);
+		return (void *)(-ENOMEM);
+	}
+	pr_debug("%s:%d Create page table (phys addr 0x%llx CPU Virt 0x%lx)\n",
+		 __func__, __LINE__, neo->page->phys_addr,
+			neo->page->cpu_virt_addr);
+
+	/* invalidate all pages */
+	for (i = 0; i < MMU_N_PAGE; i++) {
+		mmu_dir->mmu_info_cfg.pfn_page_write(neo->page, i, 0,
+				MMU_FLAG_INVALID);
+	}
+
+	/*
+	 * When non-UMA need to update the device memory after setting
+	 * it to 0
+	 */
+	if (mmu_dir->mmu_info_cfg.pfn_page_update)
+		mmu_dir->mmu_info_cfg.pfn_page_update(neo->page);
+
+	return neo;
+}
+
+/*
+ * mmu_create_directory - Create a directory entry based on a given directory
+ *			  configuration
+ * @mmu_info_ops: contains the functions to use to manage page table memory.
+ *		  Is copied and not modified.
+ *
+ * @warning Obviously creation of the directory allocates memory - do not call
+ * while interrupts are disabled
+ *
+ * @return The opaque handle to the mmu_directory object and result to 0
+ * @return (void *) in case of an error and result has the value:
+ * @li -EINVAL if mmu_info configuration is NULL or does not
+ * contain function pointers
+ * @li -ENOMEM if an internal allocation failed
+ * @li -ENOMEM if the given mmu_pfn_page_alloc returned NULL
+ */
+struct mmu_directory *mmu_create_directory(const struct mmu_info *mmu_info_ops)
+{
+	struct mmu_directory *neo = NULL;
+	unsigned int i;
+
+	/*
+	 * invalid information in the directory config:
+	 * - invalid page allocator and dealloc (page write can be NULL)
+	 * - invalid virtual address representation
+	 * - invalid page size
+	 * - invalid MMU size
+	 */
+	if (!mmu_info_ops || !mmu_info_ops->pfn_page_alloc || !mmu_info_ops->pfn_page_free) {
+		pr_err("%s:%d invalid MMU configuration\n", __func__, __LINE__);
+		return (void *)(-EINVAL);
+	}
+
+	neo = kzalloc(sizeof(*neo), GFP_KERNEL);
+	if (!neo)
+		return (void *)(-ENOMEM);
+
+	neo->dir_page_table = kcalloc(MMU_N_TABLE, sizeof(struct mmu_page_cfg_table *),
+				      GFP_KERNEL);
+	if (!neo->dir_page_table) {
+		kfree(neo);
+		return (void *)(-ENOMEM);
+	}
+
+	memcpy(&neo->mmu_info_cfg, mmu_info_ops, sizeof(struct mmu_info));
+	if (!mmu_info_ops->pfn_page_write) {
+		pr_debug("%s:%d using default MMU write\n", __func__, __LINE__);
+		/* use internal function */
+		neo->mmu_info_cfg.pfn_page_write = &mmu_pg_wr;
+	}
+
+	neo->dir_page = mmu_info_ops->pfn_page_alloc(mmu_info_ops->alloc_ctx);
+	if (!neo->dir_page) {
+		kfree(neo->dir_page_table);
+		kfree(neo);
+		return (void *)(-ENOMEM);
+	}
+
+	pr_debug("%s:%d (phys page 0x%llx; CPU virt 0x%lx)\n", __func__,
+		 __LINE__, neo->dir_page->phys_addr,
+			neo->dir_page->cpu_virt_addr);
+	/* now we have a valid mmu_directory structure */
+
+	/* invalidate all entries */
+	for (i = 0; i < MMU_N_TABLE; i++) {
+		neo->mmu_info_cfg.pfn_page_write(neo->dir_page, i, 0,
+				MMU_FLAG_INVALID);
+	}
+
+	/* when non-UMA need to update the device memory */
+	if (neo->mmu_info_cfg.pfn_page_update)
+		neo->mmu_info_cfg.pfn_page_update(neo->dir_page);
+
+	return neo;
+}
+
+/*
+ * mmu_destroy_directory - Destroy the mmu_directory - assumes that the HW is
+ *			   not going to access the memory any-more
+ * @mmu_dir: pointer to the mmu directory to destroy
+ *
+ * Does not invalidate any memory because it assumes that everything is not
+ * used any-more
+ */
+int mmu_destroy_directory(struct mmu_directory *mmu_dir)
+{
+	unsigned int i;
+
+	if (!mmu_dir) {
+		/* could be an assert */
+		pr_err("%s:%d mmu_dir is NULL\n", __func__, __LINE__);
+		return -EINVAL;
+	}
+
+	if (mmu_dir->num_mapping > 0)
+		/* mappings should have been destroyed! */
+		pr_err("%s:%d directory still has %u mapping attached to it\n",
+		       __func__, __LINE__, mmu_dir->num_mapping);
+	/*
+	 * not exiting because clearing the page table map is more
+	 * important than losing a few structures
+	 */
+
+	if (!mmu_dir->mmu_info_cfg.pfn_page_free || !mmu_dir->dir_page_table)
+		return -EINVAL;
+
+	pr_debug("%s:%d destroy MMU dir (phys page 0x%llx)\n",
+		 __func__, __LINE__, mmu_dir->dir_page->phys_addr);
+
+	/* first we destroy the directory entry */
+	mmu_dir->mmu_info_cfg.pfn_page_free(mmu_dir->dir_page);
+	mmu_dir->dir_page = NULL;
+
+	/* destroy every mapping that still exists */
+	for (i = 0; i < MMU_N_TABLE; i++) {
+		if (mmu_dir->dir_page_table[i]) {
+			mmu_pgt_destroy(mmu_dir->dir_page_table[i]);
+			mmu_dir->dir_page_table[i] = NULL;
+		}
+	}
+
+	kfree(mmu_dir->dir_page_table);
+	kfree(mmu_dir);
+	return 0;
+}
+
+/*
+ * mmu_directory_get_page - Get access to the page table structure used in the
+ *			    directory (to be able to write it to registers)
+ * @mmu_dir: pointer to the mmu directory. asserts if mmu_dir is NULL
+ *
+ * @return the page table structure used
+ */
+struct mmu_page_cfg *mmu_directory_get_page(struct mmu_directory *mmu_dir)
+{
+	if (!mmu_dir)
+		return NULL;
+
+	return mmu_dir->dir_page;
+}
+
+static struct mmu_map *mmu_directory_map(struct mmu_directory *mmu_dir,
+					 const struct mmu_heap_alloc *dev_va,
+					 unsigned int ui_map_flags,
+					 int (*phys_iter_next)(void *arg,
+							       unsigned long long *next),
+					 void *phys_iter_arg)
+{
+	unsigned int first_dir = 0;
+	unsigned int first_pg = 0;
+	unsigned int dir_off = 0;
+	unsigned int pg_off = 0;
+	unsigned int n_entries = 0;
+	unsigned int i;
+	unsigned int d;
+	const unsigned int duplicate = PAGE_SIZE / mmu_get_page_size();
+	int res = 0;
+	struct mmu_map *neo = NULL;
+	struct mmu_page_cfg_table **dir_pgtbl = NULL;
+
+	/*
+	 * in non UMA updates on pages needs to be done - store index of
+	 * directory entry pages to update
+	 */
+	unsigned int *to_update;
+	/*
+	 * number of pages in to_update (will be at least 1 for the first_pg to
+	 * update)
+	 */
+	unsigned int n_pgs_to_update = 0;
+	/*
+	 * to know if we also need to update the directory page (creation of new
+	 * page)
+	 */
+	unsigned char dir_modified = FALSE;
+
+	if (!mmu_dir || !dev_va || duplicate < 1)
+		return (void *)(-EINVAL);
+
+	dir_pgtbl = mmu_dir->dir_page_table;
+
+	n_entries = dev_va->alloc_size / PAGE_SIZE;
+	if (dev_va->alloc_size % MMU_PAGE_SIZE != 0 || n_entries == 0) {
+		pr_err("%s:%d invalid allocation size\n", __func__, __LINE__);
+		return (void *)(-EINVAL);
+	}
+
+	if ((ui_map_flags & MMU_FLAG_VALID) != 0) {
+		pr_err("%s:%d valid flag (0x%x) is set in the falgs 0x%x\n",
+		       __func__, __LINE__, MMU_FLAG_VALID, ui_map_flags);
+		return (void *)(-EINVAL);
+	}
+
+	/*
+	 * has to be dynamically allocated because it is bigger than 1k (max
+	 * stack in the kernel)
+	 * MMU_N_TABLE is 1024 for 4096B pages, that's a 4k allocation (1 page)
+	 *  - if it gets bigger may IMG_BIGALLOC should be used
+	 */
+	to_update = kcalloc(MMU_N_TABLE, sizeof(unsigned int), GFP_KERNEL);
+	if (!to_update)
+		return (void *)(-ENOMEM);
+
+	/* manage multiple page table mapping */
+
+	first_dir = mmu_dir_entry(dev_va->virt_addr);
+	first_pg = mmu_pg_entry(dev_va->virt_addr);
+
+	if (first_dir >= MMU_N_TABLE || first_pg >= MMU_N_PAGE) {
+		kfree(to_update);
+		return (void *)(-EINVAL);
+	}
+
+	/* verify that the pages that should be used are available */
+	dir_off = first_dir;
+	pg_off = first_pg;
+
+	/*
+	 * loop over the number of entries given by CPU allocator but CPU page
+	 * size can be > than MMU page size therefore it may need to "duplicate"
+	 * entries by creating a fake physical address
+	 */
+	for (i = 0; i < n_entries * duplicate; i++) {
+		if (pg_off >= MMU_N_PAGE) {
+			dir_off++; /* move to next directory */
+			if (dir_off >= MMU_N_TABLE) {
+				res = -EINVAL;
+				break;
+			}
+			pg_off = 0; /* using its first page */
+		}
+
+		/*
+		 * if dir_pgtbl[dir_off] == NULL not yet
+		 * allocated it means all entries are available
+		 */
+		if (dir_pgtbl[dir_off]) {
+			/*
+			 * inside a pagetable - verify that the required offset
+			 * is invalid
+			 */
+			struct mmu_page_cfg_table *tbl = dir_pgtbl[dir_off];
+			unsigned int *page_mem = (unsigned int *)tbl->page->cpu_virt_addr;
+
+			if ((page_mem[pg_off] & MMU_FLAG_VALID) != 0) {
+				pr_err("%s:%d one of the required page is currently in use\n",
+				       __func__, __LINE__);
+				res = -EPERM;
+				break;
+			}
+		}
+		/* PageTable struct exists */
+		pg_off++;
+	} /* for all needed entries */
+
+	/* it means one entry was not invalid or not enough page were given */
+	if (res != 0) {
+		/*
+		 * message already printed
+		 * IMG_ERROR_MEMORY_IN_USE when an entry is not invalid
+		 * IMG_ERROR_INVALID_PARAMETERS when not enough pages are given
+		 *  (or too much)
+		 */
+		kfree(to_update);
+		return (void *)(unsigned long)(res);
+	}
+
+	neo = kmalloc(sizeof(*neo), GFP_KERNEL);
+	if (!neo) {
+		kfree(to_update);
+		return (void *)(-ENOMEM);
+	}
+	neo->mmu_dir = mmu_dir;
+	neo->dev_virt_addr = *dev_va;
+	memcpy(&neo->dev_virt_addr, dev_va, sizeof(struct mmu_heap_alloc));
+	neo->used_flag = ui_map_flags;
+
+	/* we now know that all pages are available */
+	dir_off = first_dir;
+	pg_off = first_pg;
+
+	to_update[n_pgs_to_update] = first_dir;
+	n_pgs_to_update++;
+
+	for (i = 0; i < n_entries; i++) {
+		unsigned long long cur_phys_addr;
+
+		if (phys_iter_next(phys_iter_arg, &cur_phys_addr) != 0) {
+			pr_err("%s:%d not enough entries in physical address array\n",
+			       __func__, __LINE__);
+			kfree(neo);
+			kfree(to_update);
+			return (void *)(-EBUSY);
+		}
+		for (d = 0; d < duplicate; d++) {
+			if (pg_off >= MMU_N_PAGE) {
+				/* move to next directory */
+				dir_off++;
+				/* using its first page */
+				pg_off = 0;
+
+				to_update[n_pgs_to_update] = dir_off;
+				n_pgs_to_update++;
+			}
+
+			/* this page table object does not exists, create it */
+			if (!dir_pgtbl[dir_off]) {
+				dir_pgtbl[dir_off] = mmu_pgt_create(mmu_dir);
+				if (IS_ERR_VALUE((unsigned long)dir_pgtbl[dir_off])) {
+					dir_pgtbl[dir_off] = NULL;
+					goto cleanup_fail;
+				}
+				/*
+				 * make this page table valid
+				 * should be dir_off
+				 */
+				mmu_dir->mmu_info_cfg.pfn_page_write(mmu_dir->dir_page,
+						dir_off,
+						dir_pgtbl[dir_off]->page->phys_addr,
+						MMU_FLAG_VALID);
+				dir_modified = TRUE;
+			}
+
+			/*
+			 * map this particular page in the page table
+			 * use d*(MMU page size) to add additional entries from
+			 * the given physical address with the correct offset
+			 * for the MMU
+			 */
+			mmu_dir->mmu_info_cfg.pfn_page_write(dir_pgtbl[dir_off]->page,
+							     pg_off,
+							     cur_phys_addr + d *
+							     mmu_get_page_size(),
+							     neo->used_flag |
+							     MMU_FLAG_VALID);
+			dir_pgtbl[dir_off]->valid_entries++;
+
+			pg_off++;
+		} /* for duplicate */
+	} /* for entries */
+
+	neo->n_entries = n_entries * duplicate;
+	/* one more mapping is related to this directory */
+	mmu_dir->num_mapping++;
+
+	/* if non UMA we need to update device memory */
+	if (mmu_dir->mmu_info_cfg.pfn_page_update) {
+		while (n_pgs_to_update > 0) {
+			unsigned int idx = to_update[n_pgs_to_update - 1];
+			struct mmu_page_cfg_table *tbl = dir_pgtbl[idx];
+
+			mmu_dir->mmu_info_cfg.pfn_page_update(tbl->page);
+			n_pgs_to_update--;
+		}
+		if (dir_modified)
+			mmu_dir->mmu_info_cfg.pfn_page_update(mmu_dir->dir_page);
+	}
+
+	kfree(to_update);
+	return neo;
+
+cleanup_fail:
+	pr_err("%s:%d failed to create a non-existing page table\n", __func__, __LINE__);
+
+	/*
+	 * invalidate all already mapped pages -
+	 * do not destroy the created pages
+	 */
+	while (i > 1) {
+		if (d == 0) {
+			i--;
+			d = duplicate;
+		}
+		d--;
+
+		if (pg_off == 0) {
+			pg_off = MMU_N_PAGE;
+			if (!dir_off)
+				continue;
+			dir_off--;
+		}
+
+		pg_off--;
+
+		/* it should have been used before */
+		if (!dir_pgtbl[dir_off])
+			continue;
+
+		mmu_dir->mmu_info_cfg.pfn_page_write(dir_pgtbl[dir_off]->page,
+				pg_off, 0,
+				MMU_FLAG_INVALID);
+		dir_pgtbl[dir_off]->valid_entries--;
+	}
+
+	kfree(neo);
+	kfree(to_update);
+	return (void *)(-ENOMEM);
+}
+
+/*
+ * with sg
+ */
+struct sg_phys_iter {
+	void *sgl;
+	unsigned int offset;
+};
+
+static int sg_phys_iter_next(void *arg, unsigned long long *next)
+{
+	struct sg_phys_iter *iter = arg;
+
+	if (!iter->sgl)
+		return -ENOENT;
+
+	*next = sg_phys(iter->sgl) + iter->offset; /* phys_addr to dma_addr? */
+	iter->offset += PAGE_SIZE;
+
+	if (iter->offset == img_mmu_get_sgl_length(iter->sgl)) {
+		iter->sgl = sg_next(iter->sgl);
+		iter->offset = 0;
+	}
+
+	return 0;
+}
+
+/*
+ * mmu_directory_map_sg - Create a page table mapping for a list of physical
+ *			  pages and device virtual address
+ *
+ * @mmu_dir: directory to use for the mapping
+ * @phys_page_sg: sorted array of physical addresses (ascending order). The
+ *		  number of elements is dev_va->alloc_size/MMU_PAGE_SIZE
+ * @note This array can potentially be big, the caller may need to use vmalloc
+ * if running the linux kernel (e.g. mapping a 1080p NV12 is 760 entries, 6080
+ * Bytes - 2 CPU pages needed, fine with kmalloc; 4k NV12 is 3038 entries,
+ * 24304 Bytes - 6 CPU pages needed, kmalloc would try to find 8 contiguous
+ * pages which may be problematic if memory is fragmented)
+ * @dev_va: associated device virtual address. Given structure is copied
+ * @map_flag: flags to apply on the page (typically 0x2 for Write Only,
+ *	      0x4 for Read Only) - the flag should not set bit 1 as 0x1 is the
+ *	      valid flag.
+ *
+ * @warning Mapping can cause memory allocation (missing pages) - do not call
+ * while interrupts are disabled
+ *
+ * @return The opaque handle to the mmu_map object and result to 0
+ * @return (void *) in case of an error with the following values:
+ * @li -EINVAL if the allocation size is not a multiple of MMU_PAGE_SIZE,
+ *     if the given list of page table is too long or not long enough for the
+ *     mapping or if the give flags set the invalid bit
+ * @li -EPERM if the virtual memory is already mapped
+ * @li -ENOMEM if an internal allocation failed
+ * @li -ENOMEM if a page creation failed
+ */
+struct mmu_map *mmu_directory_map_sg(struct mmu_directory *mmu_dir,
+				     void *phys_page_sg,
+				     const struct mmu_heap_alloc *dev_va,
+				     unsigned int map_flag)
+{
+	struct sg_phys_iter arg = { phys_page_sg };
+
+	return mmu_directory_map(mmu_dir, dev_va, map_flag,
+				 sg_phys_iter_next, &arg);
+}
+
+/*
+ * mmu_directory_unmap - Un-map the mapped pages (invalidate their entries) and
+ *			 destroy the mapping object
+ * @map: pointer to the pages to un-map
+ *
+ * This does not destroy the created Page Table (even if they are becoming
+ * un-used) and does not change the Directory valid bits.
+ *
+ * @return 0
+ */
+int mmu_directory_unmap(struct mmu_map *map)
+{
+	unsigned int first_dir = 0;
+	unsigned int first_pg = 0;
+	unsigned int dir_offset = 0;
+	unsigned int pg_offset = 0;
+	unsigned int i;
+	struct mmu_directory *mmu_dir = NULL;
+
+	/*
+	 * in non UMA updates on pages needs to be done - store index of
+	 * directory entry pages to update
+	 */
+	unsigned int *to_update;
+	unsigned int n_pgs_to_update = 0;
+
+	if (!map || map->n_entries <= 0 || !map->mmu_dir)
+		return -EINVAL;
+
+	mmu_dir = map->mmu_dir;
+
+	/*
+	 * has to be dynamically allocated because it is bigger than 1k (max
+	 * stack in the kernel)
+	 */
+	to_update = kcalloc(MMU_N_TABLE, sizeof(unsigned int), GFP_KERNEL);
+	if (!to_update)
+		return -ENOMEM;
+
+	first_dir = mmu_dir_entry(map->dev_virt_addr.virt_addr);
+	first_pg = mmu_pg_entry(map->dev_virt_addr.virt_addr);
+
+	/* verify that the pages that should be used are available */
+	dir_offset = first_dir;
+	pg_offset = first_pg;
+
+	to_update[n_pgs_to_update] = first_dir;
+	n_pgs_to_update++;
+
+	for (i = 0; i < map->n_entries; i++) {
+		if (pg_offset >= MMU_N_PAGE) {
+			/* move to next directory */
+			dir_offset++;
+			/* using its first page */
+			pg_offset = 0;
+
+			to_update[n_pgs_to_update] = dir_offset;
+			n_pgs_to_update++;
+		}
+
+		/*
+		 * this page table object does not exist, something destroyed
+		 * it while the mapping was supposed to use it
+		 */
+		if (mmu_dir->dir_page_table[dir_offset]) {
+			mmu_dir->mmu_info_cfg.pfn_page_write
+				(mmu_dir->dir_page_table[dir_offset]->page,
+				 pg_offset, 0,
+				 MMU_FLAG_INVALID);
+			mmu_dir->dir_page_table[dir_offset]->valid_entries--;
+		}
+
+		pg_offset++;
+	}
+
+	mmu_dir->num_mapping--;
+
+	if (mmu_dir->mmu_info_cfg.pfn_page_update)
+		while (n_pgs_to_update > 0) {
+			unsigned int idx = to_update[n_pgs_to_update - 1];
+			struct mmu_page_cfg_table *tbl = mmu_dir->dir_page_table[idx];
+
+			mmu_dir->mmu_info_cfg.pfn_page_update(tbl->page);
+			n_pgs_to_update--;
+		}
+
+	/* mapping does not own the given virtual address */
+	kfree(map);
+	kfree(to_update);
+	return 0;
+}
+
+unsigned int mmu_directory_get_pagetable_entry(struct mmu_directory *mmu_dir,
+					       unsigned long dev_virt_addr)
+{
+	unsigned int dir_entry = 0;
+	unsigned int table_entry = 0;
+	struct mmu_page_cfg_table *tbl;
+	struct mmu_page_cfg_table **dir_pgtbl = NULL;
+	unsigned int *page_mem;
+
+	if (!mmu_dir) {
+		pr_err("mmu directory table is NULL\n");
+		return 0xFFFFFF;
+	}
+
+	dir_pgtbl = mmu_dir->dir_page_table;
+
+	dir_entry = mmu_dir_entry(dev_virt_addr);
+	table_entry = mmu_pg_entry(dev_virt_addr);
+
+	tbl = dir_pgtbl[dir_entry];
+	if (!tbl) {
+		pr_err("page table entry is NULL\n");
+		return 0xFFFFFF;
+	}
+
+	page_mem = (unsigned int *)tbl->page->cpu_virt_addr;
+
+#if defined(DEBUG_DECODER_DRIVER) || defined(DEBUG_ENCODER_DRIVER)
+	pr_info("Page table value@dir_entry:table_entry[%d : %d] = %x\n",
+		dir_entry, table_entry, page_mem[table_entry]);
+#endif
+
+	return page_mem[table_entry];
+}
diff --git a/drivers/staging/media/vxd/common/imgmmu.h b/drivers/staging/media/vxd/common/imgmmu.h
new file mode 100644
index 000000000000..b35256d09e24
--- /dev/null
+++ b/drivers/staging/media/vxd/common/imgmmu.h
@@ -0,0 +1,180 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * IMG DEC MMU Library
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ *	Angela Stegmaier <angelabaker@ti.com>
+ *
+ * Re-written for upstreamimg
+ *	Sidraya Jayagond <sidraya.bj@pathpartnertech.com>
+ */
+
+#ifndef IMG_DEC_MMU_MMU_H
+#define IMG_DEC_MMU_MMU_H
+
+#include <linux/types.h>
+
+#ifndef MMU_PHYS_SIZE
+/* @brief MMU physical address size in bits */
+#define MMU_PHYS_SIZE 40
+#endif
+
+#ifndef MMU_VIRT_SIZE
+/* @brief MMU virtual address size in bits */
+#define MMU_VIRT_SIZE 32
+#endif
+
+#ifndef MMU_PAGE_SIZE
+/* @brief Page size in bytes */
+#define MMU_PAGE_SIZE 4096u
+#define MMU_PAGE_SHIFT 12
+#define MMU_DIR_SHIFT 22
+#endif
+
+#if MMU_VIRT_SIZE == 32
+/* @brief max number of pagetable that can be stored in the directory entry */
+#define MMU_N_TABLE (MMU_PAGE_SIZE / 4u)
+/* @brief max number of page mapping in the pagetable */
+#define MMU_N_PAGE (MMU_PAGE_SIZE / 4u)
+#endif
+
+/* @brief Memory flag used to mark a page mapping as invalid */
+#define MMU_FLAG_VALID 0x1
+#define MMU_FLAG_INVALID 0x0
+
+/*
+ * This type defines MMU variant.
+ */
+enum mmu_etype {
+	MMU_TYPE_NONE = 0,
+	MMU_TYPE_32BIT,
+	MMU_TYPE_36BIT,
+	MMU_TYPE_40BIT,
+	MMU_TYPE_FORCE32BITS = 0x7FFFFFFFU
+};
+
+/* @brief Page offset mask in virtual address - bottom bits */
+static const unsigned long VIRT_PAGE_OFF_MASK = ((1 << MMU_PAGE_SHIFT) - 1);
+/* @brief Page table index mask in virtual address - middle bits */
+static const unsigned long VIRT_PAGE_TBL_MASK =
+	(((1 << MMU_DIR_SHIFT) - 1) & ~(((1 << MMU_PAGE_SHIFT) - 1)));
+/* @brief Directory index mask in virtual address - high bits */
+static const unsigned long VIRT_DIR_IDX_MASK = (~((1 << MMU_DIR_SHIFT) - 1));
+
+/*
+ * struct mmu_heap_alloc - information about a virtual mem heap allocation
+ * @virt_addr: pointer to start of the allocation
+ * @alloc_size: size in bytes
+ */
+struct mmu_heap_alloc {
+	unsigned long	virt_addr;
+	unsigned long	alloc_size;
+};
+
+/*
+ * struct mmu_page_cfg - mmu_page configuration
+ * @phys_addr: physical address - unsigned long long is used to support extended physical
+ *	       address on 32bit system
+ * @cpu_virt_addr: CPU virtual address pointer
+ */
+struct mmu_page_cfg {
+	unsigned long long	phys_addr;
+	unsigned long	cpu_virt_addr;
+};
+
+/*
+ * typedef mmu_pfn_page_alloc - page table allocation function
+ *
+ * Pointer to a function implemented by the used allocator to create 1
+ * page table (used for the MMU mapping - directory page and mapping page)
+ *
+ * Return:
+ * * A populated mmu_page_cfg structure with the result of the page alloc.
+ * * NULL if the allocation failed.
+ */
+typedef struct mmu_page_cfg *(*mmu_pfn_page_alloc) (void *);
+
+/*
+ * typedef mmu_pfn_page_free
+ * @arg1: pointer to the mmu_page_cfg that is allocated using mmu_pfn_page_alloc
+ *
+ * Pointer to a function to free the allocated page table used for MMU mapping.
+ *
+ * @return void
+ */
+typedef void (*mmu_pfn_page_free) (struct mmu_page_cfg *arg1);
+
+/*
+ * typedef mmu_pfn_page_update
+ * @arg1: pointer to the mmu_page_cfg that is allocated using mmu_pfn_page_alloc
+ *
+ * Pointer to a function to update Device memory on non Unified Memory
+ *
+ * @return void
+ */
+typedef void (*mmu_pfn_page_update) (struct mmu_page_cfg *arg1);
+
+/*
+ * typedef mmu_pfn_page_write
+ * @mmu_page: mmu_page mmu page configuration to be written
+ * @offset: offset in entries (32b word)
+ * @pa_to_write: pa_to_write physical address to write
+ * @flags: flags bottom part of the entry used as flags for the MMU (including
+ *	   valid flag)
+ *
+ * Pointer to a function to write to a device address
+ *
+ * @return void
+ */
+typedef void (*mmu_pfn_page_write) (struct mmu_page_cfg *mmu_page,
+				    unsigned int offset,
+				    unsigned long long pa_to_write, unsigned int flags);
+
+/*
+ * struct mmu_info
+ * @pfn_page_alloc: function pointer for allocating a physical page used in
+ *		    MMU mapping
+ * @alloc_ctx: allocation context handler
+ * @pfn_page_free: function pointer for freeing a physical page used in
+ *		   MMU mapping
+ * @pfn_page_write: function pointer to write a physical address onto a page.
+ *		    If NULL, then internal function is used. Internal function
+ *		    assumes that MMU_PHYS_SIZE is the MMU size.
+ * @pfn_page_update: function pointer to update a physical page on device if
+ *		     non UMA.
+ */
+struct mmu_info {
+	mmu_pfn_page_alloc	pfn_page_alloc;
+	void			*alloc_ctx;
+	mmu_pfn_page_free	pfn_page_free;
+	mmu_pfn_page_write	pfn_page_write;
+	mmu_pfn_page_update	pfn_page_update;
+};
+
+/*
+ * mmu_get_page_size() - Access the compilation specified page size of the
+ *			 MMU (in Bytes)
+ */
+static inline unsigned long mmu_get_page_size(void)
+{
+	return MMU_PAGE_SIZE;
+}
+
+struct mmu_directory *mmu_create_directory(const struct mmu_info *mmu_info_ops);
+int mmu_destroy_directory(struct mmu_directory *mmu_dir);
+
+struct mmu_page_cfg *mmu_directory_get_page(struct mmu_directory *mmu_dir);
+
+struct mmu_map *mmu_directory_map_sg(struct mmu_directory *mmu_dir,
+				     void *phys_page_sg,
+				     const struct mmu_heap_alloc *dev_va,
+				     unsigned int map_flag);
+int mmu_directory_unmap(struct mmu_map *map);
+
+unsigned int mmu_directory_get_pagetable_entry(struct mmu_directory *mmu_dir,
+					       unsigned long dev_virt_addr);
+
+#endif /* IMG_DEC_MMU_MMU_H */
-- 
2.17.1


-- 






This
message contains confidential information and is intended only 
for the
individual(s) named. If you are not the intended
recipient, you are 
notified that disclosing, copying, distributing or taking any
action in 
reliance on the contents of this mail and attached file/s is strictly
prohibited. Please notify the
sender immediately and delete this e-mail 
from your system. E-mail transmission
cannot be guaranteed to be secured or 
error-free as information could be
intercepted, corrupted, lost, destroyed, 
arrive late or incomplete, or contain
viruses. The sender therefore does 
not accept liability for any errors or
omissions in the contents of this 
message, which arise as a result of e-mail
transmission.

^ permalink raw reply related	[flat|nested] 48+ messages in thread

* [PATCH 03/30] v4l: vxd-dec: Create vxd_dec Mem Manager helper library
  2021-08-18 14:10 [PATCH 00/30] TI Video Decoder driver upstreaming to v5.14-rc6 kernel sidraya.bj
  2021-08-18 14:10 ` [PATCH 01/30] dt-bindings: Add binding for img,d5500-vxd for DRA8x sidraya.bj
  2021-08-18 14:10 ` [PATCH 02/30] v4l: vxd-dec: Create mmu programming helper library sidraya.bj
@ 2021-08-18 14:10 ` sidraya.bj
  2021-08-24 13:34   ` Dan Carpenter
  2021-08-18 14:10 ` [PATCH 04/30] v4l: vxd-dec: Add vxd " sidraya.bj
                   ` (28 subsequent siblings)
  31 siblings, 1 reply; 48+ messages in thread
From: sidraya.bj @ 2021-08-18 14:10 UTC (permalink / raw)
  To: gregkh, linux-staging, linux-kernel
  Cc: prashanth.ka, praneeth, mchehab, linux-media, praveen.ap, Sidraya

From: Sidraya <sidraya.bj@pathpartnertech.com>

The memory manager helper library provides functions for managing
the memory context of a decode stream. Each stream will have it's
own memory context with associated mmu context and heap
allocations. The memory manager tracks the allocations and mappings
per context, as well as providing a wrapper around the MMU library.
It also provides functions for the driver to query information about
the page table directory for a particular memory context.

In addition, the memory manager provides the ability to plug in

different heaps (unified, carveout, dmabuf, etc.) that the caller
can use when doing memory allocations.

By default, the "unified" heap functionality is supported. No other
types of heaps are supported at this time, though the framework is
present to add more heap types in the future, if needed. This
heap is used only for allocating internal buffers used for communication
with the hardware, and for loading the firmware.

Functions are provided for creating/destroying a memory context,
creating/destroying an MMU context, mapping and unmapping buffers in
the device MMU, allocating and freeing buffers from specified available
heaps, and retrieving information about those allocations.

Signed-off-by: Buddy Liong <buddy.liong@ti.com>
Signed-off-by: Angela Stegmaier <angelabaker@ti.com>
Signed-off-by: Sidraya <sidraya.bj@pathpartnertech.com>
---
 MAINTAINERS                                   |    3 +
 .../staging/media/vxd/common/img_mem_man.c    | 1124 +++++++++++++++++
 .../staging/media/vxd/common/img_mem_man.h    |  231 ++++
 .../media/vxd/common/img_mem_unified.c        |  276 ++++
 4 files changed, 1634 insertions(+)
 create mode 100644 drivers/staging/media/vxd/common/img_mem_man.c
 create mode 100644 drivers/staging/media/vxd/common/img_mem_man.h
 create mode 100644 drivers/staging/media/vxd/common/img_mem_unified.c

diff --git a/MAINTAINERS b/MAINTAINERS
index 2e921650a14c..150272927839 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -19537,6 +19537,9 @@ M:	Sidraya Jayagond <sidraya.bj@pathpartnertech.com>
 L:	linux-media@vger.kernel.org
 S:	Maintained
 F:	Documentation/devicetree/bindings/media/img,d5520-vxd.yaml
+F:	drivers/staging/media/vxd/common/img_mem_man.c
+F:	drivers/staging/media/vxd/common/img_mem_man.h
+F:	drivers/staging/media/vxd/common/img_mem_unified.c
 F:	drivers/staging/media/vxd/common/imgmmu.c
 F:	drivers/staging/media/vxd/common/imgmmu.h
 
diff --git a/drivers/staging/media/vxd/common/img_mem_man.c b/drivers/staging/media/vxd/common/img_mem_man.c
new file mode 100644
index 000000000000..cf9792d9a1a9
--- /dev/null
+++ b/drivers/staging/media/vxd/common/img_mem_man.c
@@ -0,0 +1,1124 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * IMG DEC Memory Manager
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ *	Angela Stegmaier <angelabaker@ti.com>
+ *
+ * Re-written for upstreamimg
+ *	Sidraya Jayagond <sidraya.bj@pathpartnertech.com>
+ */
+
+#include <linux/idr.h>
+#include <linux/slab.h>
+#include <linux/dma-mapping.h>
+#include <linux/printk.h>
+#include <linux/mutex.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-mem2mem.h>
+
+#include "imgmmu.h"
+#include "img_mem_man.h"
+
+#define VXD_MMU_SHIFT 8 /* assume 40-bit MMU */
+/* heaps ids (global) */
+#define MIN_HEAP 1
+#define MAX_HEAP 16
+
+/*
+ * struct dev_mem_man - the device memory management
+ * @heaps: idr list of heap for the device memory manager
+ * @mem_ctxs: contains lists of mem_ctx
+ * @mutex: mutex for this device
+ */
+struct mem_man {
+	void *dev;
+	struct idr *heaps;
+	struct list_head mem_ctxs;
+	struct mutex *mutex; /* mutex for this device */
+};
+
+static struct mem_man mem_man_data = {0};
+
+/**
+ * struct mmu_page - the mmu page information for the buffer
+ * @buffer: buffer pointer for the particular mmu_page
+ * @page_cfg: mmu page configuration of physical and virtual addr
+ * @addr_shift: address shifting information
+ */
+struct mmu_page {
+	struct buffer *buffer;
+	struct mmu_page_cfg page_cfg;
+	unsigned int addr_shift;
+};
+
+static void _img_mem_free(struct buffer *buffer);
+static void _img_mmu_unmap(struct mmu_ctx_mapping *mapping);
+static void _img_mmu_ctx_destroy(struct mmu_ctx *ctx);
+
+#if defined(DEBUG_DECODER_DRIVER)
+static unsigned char *get_heap_name(enum heap_type type)
+{
+	switch (type) {
+	case MEM_HEAP_TYPE_UNIFIED:
+		return "unified";
+	default:
+		return "unknown";
+	}
+}
+#endif
+
+int img_mem_add_heap(const struct heap_config *heap_cfg, int *heap_id)
+{
+	struct mem_man *mem_man = &mem_man_data;
+	struct heap *heap;
+	int (*init_fn)(const struct heap_config *heap_cfg, struct heap *heap);
+	int ret;
+
+	switch (heap_cfg->type) {
+	case MEM_HEAP_TYPE_UNIFIED:
+		init_fn = img_mem_unified_init;
+		break;
+	default:
+		dev_err(mem_man->dev, "%s: heap type %d unknown\n", __func__,
+			heap_cfg->type);
+		return -EINVAL;
+	}
+
+	heap = kmalloc(sizeof(*heap), GFP_KERNEL);
+	if (!heap)
+		return -ENOMEM;
+
+	ret = mutex_lock_interruptible_nested(mem_man->mutex, SUBCLASS_IMGMEM);
+	if (ret)
+		goto lock_failed;
+
+	ret = idr_alloc(mem_man->heaps, heap, MIN_HEAP, MAX_HEAP, GFP_KERNEL);
+	if (ret < 0) {
+		dev_err(mem_man->dev, "%s: idr_alloc failed\n", __func__);
+		goto alloc_id_failed;
+	}
+
+	heap->id = ret;
+	heap->type = heap_cfg->type;
+	heap->options = heap_cfg->options;
+	heap->to_dev_addr = heap_cfg->to_dev_addr;
+	heap->priv = NULL;
+
+	ret = init_fn(heap_cfg, heap);
+	if (ret) {
+		dev_err(mem_man->dev, "%s: heap init failed\n", __func__);
+		goto heap_init_failed;
+	}
+
+	*heap_id = heap->id;
+	mutex_unlock(mem_man->mutex);
+
+#ifdef DEBUG_DECODER_DRIVER
+	dev_info(mem_man->dev, "%s created heap %d type %d (%s)\n",
+		 __func__, *heap_id, heap_cfg->type, get_heap_name(heap->type));
+#endif
+	return 0;
+
+heap_init_failed:
+	idr_remove(mem_man->heaps, heap->id);
+alloc_id_failed:
+	mutex_unlock(mem_man->mutex);
+lock_failed:
+	kfree(heap);
+	return ret;
+}
+
+static void _img_mem_del_heap(struct heap *heap)
+{
+	struct mem_man *mem_man = &mem_man_data;
+
+	if (heap->ops->destroy)
+		heap->ops->destroy(heap);
+
+	idr_remove(mem_man->heaps, heap->id);
+}
+
+void img_mem_del_heap(int heap_id)
+{
+	struct mem_man *mem_man = &mem_man_data;
+	struct heap *heap;
+
+	mutex_lock_nested(mem_man->mutex, SUBCLASS_IMGMEM);
+
+	heap = idr_find(mem_man->heaps, heap_id);
+	if (!heap) {
+		dev_warn(mem_man->dev, "%s heap %d not found!\n", __func__,
+			 heap_id);
+		mutex_unlock(mem_man->mutex);
+		return;
+	}
+
+	_img_mem_del_heap(heap);
+
+	mutex_unlock(mem_man->mutex);
+
+	kfree(heap);
+}
+
+int img_mem_create_ctx(struct mem_ctx **new_ctx)
+{
+	struct mem_man *mem_man = &mem_man_data;
+	struct mem_ctx *ctx;
+
+	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+	if (!ctx)
+		return -ENOMEM;
+
+	ctx->buffers = kzalloc(sizeof(*ctx->buffers), GFP_KERNEL);
+	if (!ctx->buffers)
+		return -ENOMEM;
+	idr_init(ctx->buffers);
+
+	INIT_LIST_HEAD(&ctx->mmu_ctxs);
+
+	mutex_lock_nested(mem_man->mutex, SUBCLASS_IMGMEM);
+	list_add(&ctx->mem_man_entry, &mem_man->mem_ctxs);
+	mutex_unlock(mem_man->mutex);
+
+	*new_ctx = ctx;
+	return 0;
+}
+
+static void _img_mem_destroy_ctx(struct mem_ctx *ctx)
+{
+	struct mem_man *mem_man = &mem_man_data;
+	struct buffer *buffer;
+	int buff_id;
+
+	/* free derelict mmu contexts */
+	while (!list_empty(&ctx->mmu_ctxs)) {
+		struct mmu_ctx *mc;
+
+		mc = list_first_entry(&ctx->mmu_ctxs,
+				      struct mmu_ctx, mem_ctx_entry);
+		dev_warn(mem_man->dev, "%s: found derelict mmu context %p\n",
+			 __func__, mc);
+		_img_mmu_ctx_destroy(mc);
+		kfree(mc);
+	}
+
+	/* free derelict buffers */
+	buff_id = MEM_MAN_MIN_BUFFER;
+	buffer = idr_get_next(ctx->buffers, &buff_id);
+	while (buffer) {
+		dev_warn(mem_man->dev, "%s: found derelict buffer %d\n",
+			 __func__, buff_id);
+		if (buffer->heap)
+			_img_mem_free(buffer);
+		else
+			idr_remove(ctx->buffers, buffer->id);
+		kfree(buffer);
+		buff_id = MEM_MAN_MIN_BUFFER;
+		buffer = idr_get_next(ctx->buffers, &buff_id);
+	}
+
+	idr_destroy(ctx->buffers);
+	kfree(ctx->buffers);
+	__list_del_entry(&ctx->mem_man_entry);
+}
+
+void img_mem_destroy_ctx(struct mem_ctx *ctx)
+{
+	struct mem_man *mem_man = &mem_man_data;
+
+	mutex_lock_nested(mem_man->mutex, SUBCLASS_IMGMEM);
+	_img_mem_destroy_ctx(ctx);
+	mutex_unlock(mem_man->mutex);
+
+	kfree(ctx);
+}
+
+static int _img_mem_alloc(void *device, struct mem_ctx *ctx,
+			  struct heap *heap, unsigned long size,
+			  enum mem_attr attr, struct buffer **buffer_new)
+{
+	struct buffer *buffer;
+	int ret;
+
+	if (size == 0) {
+		dev_err(device, "%s: buffer size is zero\n", __func__);
+		return -EINVAL;
+	}
+
+	if (!heap->ops || !heap->ops->alloc) {
+		dev_err(device, "%s: no alloc function in heap %d!\n",
+			__func__, heap->id);
+		return -EINVAL;
+	}
+
+	buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
+	if (!buffer)
+		return -ENOMEM;
+
+	ret = idr_alloc(ctx->buffers, buffer,
+			MEM_MAN_MIN_BUFFER, MEM_MAN_MAX_BUFFER, GFP_KERNEL);
+	if (ret < 0) {
+		dev_err(device, "%s: idr_alloc failed\n", __func__);
+		goto idr_alloc_failed;
+	}
+
+	buffer->id = ret;
+	buffer->request_size = size;
+	buffer->actual_size = ((size + PAGE_SIZE - 1) / PAGE_SIZE) * PAGE_SIZE;
+	buffer->device = device;
+	buffer->mem_ctx = ctx;
+	buffer->heap = heap;
+	INIT_LIST_HEAD(&buffer->mappings);
+	buffer->kptr = NULL;
+	buffer->priv = NULL;
+
+	ret = heap->ops->alloc(device, heap, buffer->actual_size, attr,
+		buffer);
+	if (ret) {
+		dev_err(device, "%s: heap %d alloc failed\n", __func__,
+			heap->id);
+		goto heap_alloc_failed;
+	}
+
+	*buffer_new = buffer;
+
+	dev_dbg(device, "%s heap %p ctx %p created buffer %d (%p) actual_size %zu\n",
+		__func__, heap, ctx, buffer->id, buffer, buffer->actual_size);
+	return 0;
+
+heap_alloc_failed:
+	idr_remove(ctx->buffers, buffer->id);
+idr_alloc_failed:
+	kfree(buffer);
+	return ret;
+}
+
+int img_mem_alloc(void *device, struct mem_ctx *ctx, int heap_id,
+		  unsigned long size, enum mem_attr attr, int *buf_id)
+{
+	struct mem_man *mem_man = &mem_man_data;
+	struct heap *heap;
+	struct buffer *buffer;
+	int ret;
+
+	dev_dbg(device, "%s heap %d ctx %p size %zu\n", __func__, heap_id,
+		ctx, size);
+
+	ret = mutex_lock_interruptible_nested(mem_man->mutex, SUBCLASS_IMGMEM);
+	if (ret)
+		return ret;
+
+	heap = idr_find(mem_man->heaps, heap_id);
+	if (!heap) {
+		dev_err(device, "%s: heap id %d not found\n", __func__,
+			heap_id);
+		mutex_unlock(mem_man->mutex);
+		return -EINVAL;
+	}
+
+	ret = _img_mem_alloc(device, ctx, heap, size, attr, &buffer);
+	if (ret) {
+		mutex_unlock(mem_man->mutex);
+		return ret;
+	}
+
+	*buf_id = buffer->id;
+	mutex_unlock(mem_man->mutex);
+
+	dev_dbg(device, "%s heap %d ctx %p created buffer %d (%p) size %zu\n",
+		__func__, heap_id, ctx, *buf_id, buffer, size);
+	return ret;
+}
+
+static int _img_mem_import(void *device, struct mem_ctx *ctx,
+			   unsigned long size, enum mem_attr attr, struct buffer **buffer_new)
+{
+	struct buffer *buffer;
+	int ret;
+
+	if (size == 0) {
+		dev_err(device, "%s: buffer size is zero\n", __func__);
+		return -EINVAL;
+	}
+
+	buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
+	if (!buffer)
+		return -ENOMEM;
+
+	ret = idr_alloc(ctx->buffers, buffer,
+			MEM_MAN_MIN_BUFFER, MEM_MAN_MAX_BUFFER, GFP_KERNEL);
+	if (ret < 0) {
+		dev_err(device, "%s: idr_alloc failed\n", __func__);
+		goto idr_alloc_failed;
+	}
+
+	buffer->id = ret;
+	buffer->request_size = size;
+	buffer->actual_size = ((size + PAGE_SIZE - 1) / PAGE_SIZE) * PAGE_SIZE;
+	buffer->device = device;
+	buffer->mem_ctx = ctx;
+	buffer->heap = NULL;
+	INIT_LIST_HEAD(&buffer->mappings);
+	buffer->kptr = NULL;
+	buffer->priv = NULL;
+
+	*buffer_new = buffer;
+
+	dev_dbg(device, "%s ctx %p created buffer %d (%p) actual_size %zu\n",
+		__func__, ctx, buffer->id, buffer, buffer->actual_size);
+	return 0;
+
+idr_alloc_failed:
+	kfree(buffer);
+	return ret;
+}
+
+int img_mem_import(void *device, struct mem_ctx *ctx,
+		   unsigned long size, enum mem_attr attr, int *buf_id)
+{
+	struct mem_man *mem_man = &mem_man_data;
+	struct buffer *buffer;
+	int ret;
+
+	dev_dbg(device, "%s ctx %p size %zu\n", __func__, ctx, size);
+
+	ret = mutex_lock_interruptible_nested(mem_man->mutex, SUBCLASS_IMGMEM);
+	if (ret)
+		return ret;
+
+	ret = _img_mem_import(device, ctx, size, attr, &buffer);
+	if (ret) {
+		mutex_unlock(mem_man->mutex);
+		return ret;
+	}
+
+	*buf_id = buffer->id;
+	mutex_unlock(mem_man->mutex);
+
+	dev_dbg(device, "%s ctx %p created buffer %d (%p) size %zu\n",
+		__func__, ctx, *buf_id, buffer, size);
+	return ret;
+}
+
+static void _img_mem_free(struct buffer *buffer)
+{
+	void *dev = buffer->device;
+	struct heap *heap = buffer->heap;
+	struct mem_ctx *ctx = buffer->mem_ctx;
+
+	if (!heap->ops || !heap->ops->free) {
+		dev_err(dev, "%s: no free function in heap %d!\n",
+			__func__, heap->id);
+		return;
+	}
+
+	while (!list_empty(&buffer->mappings)) {
+		struct mmu_ctx_mapping *map;
+
+		map = list_first_entry(&buffer->mappings,
+				       struct mmu_ctx_mapping, buffer_entry);
+		dev_warn(dev, "%s: found mapping for buffer %d (size %zu)\n",
+			 __func__, map->buffer->id, map->buffer->actual_size);
+
+		_img_mmu_unmap(map);
+
+		kfree(map);
+	}
+
+	heap->ops->free(heap, buffer);
+
+	idr_remove(ctx->buffers, buffer->id);
+}
+
+void img_mem_free(struct mem_ctx *ctx, int buff_id)
+{
+	struct mem_man *mem_man = &mem_man_data;
+	struct buffer *buffer;
+
+	mutex_lock_nested(mem_man->mutex, SUBCLASS_IMGMEM);
+
+	buffer = idr_find(ctx->buffers, buff_id);
+	if (!buffer) {
+		dev_err(mem_man->dev, "%s: buffer id %d not found\n",
+			__func__, buff_id);
+		mutex_unlock(mem_man->mutex);
+		return;
+	}
+
+	_img_mem_free(buffer);
+
+	mutex_unlock(mem_man->mutex);
+
+	kfree(buffer);
+}
+
+void img_mem_free_bufid(struct mem_ctx *ctx, int buff_id)
+{
+	struct mem_man *mem_man = &mem_man_data;
+	struct buffer *buffer;
+
+	mutex_lock_nested(mem_man->mutex, SUBCLASS_IMGMEM);
+
+	buffer = idr_find(ctx->buffers, buff_id);
+	if (!buffer) {
+		dev_err(mem_man->dev, "%s: buffer id %d not found\n",
+			__func__, buff_id);
+		mutex_unlock(mem_man->mutex);
+		return;
+	}
+
+	idr_remove(ctx->buffers, buffer->id);
+
+	mutex_unlock(mem_man->mutex);
+
+	kfree(buffer);
+}
+
+static int _img_mem_map_km(struct buffer *buffer)
+{
+	void *dev = buffer->device;
+	struct heap *heap = buffer->heap;
+
+	if (!heap->ops || !heap->ops->map_km) {
+		dev_err(dev, "%s: no map_km in heap %d!\n", __func__, heap->id);
+		return -EINVAL;
+	}
+
+	return heap->ops->map_km(heap, buffer);
+}
+
+int img_mem_map_km(struct mem_ctx *ctx, int buff_id)
+{
+	struct mem_man *mem_man = &mem_man_data;
+	struct buffer *buffer;
+	int ret;
+
+	mutex_lock_nested(mem_man->mutex, SUBCLASS_IMGMEM);
+	buffer = idr_find(ctx->buffers, buff_id);
+	if (!buffer) {
+		dev_err(mem_man->dev, "%s: buffer id %d not found\n",
+			__func__, buff_id);
+		mutex_unlock(mem_man->mutex);
+		return -EINVAL;
+	}
+
+	ret = _img_mem_map_km(buffer);
+
+	mutex_unlock(mem_man->mutex);
+
+	return ret;
+}
+
+void *img_mem_get_kptr(struct mem_ctx *ctx, int buff_id)
+{
+	struct mem_man *mem_man = &mem_man_data;
+	struct buffer *buffer;
+	void *kptr;
+
+	mutex_lock_nested(mem_man->mutex, SUBCLASS_IMGMEM);
+	buffer = idr_find(ctx->buffers, buff_id);
+	if (!buffer) {
+		dev_err(mem_man->dev, "%s: buffer id %d not found\n", __func__,
+			buff_id);
+		mutex_unlock(mem_man->mutex);
+		return NULL;
+	}
+	kptr = buffer->kptr;
+	mutex_unlock(mem_man->mutex);
+	return kptr;
+}
+
+static void _img_mem_sync_cpu_to_device(struct buffer *buffer)
+{
+	struct heap *heap = buffer->heap;
+
+	if (heap->ops && heap->ops->sync_cpu_to_dev)
+		heap->ops->sync_cpu_to_dev(heap, buffer);
+
+	/* sync to device memory */
+	mb();
+}
+
+int img_mem_sync_cpu_to_device(struct mem_ctx *ctx, int buff_id)
+{
+	struct mem_man *mem_man = &mem_man_data;
+	struct buffer *buffer;
+
+	mutex_lock_nested(mem_man->mutex, SUBCLASS_IMGMEM);
+	buffer = idr_find(ctx->buffers, buff_id);
+	if (!buffer) {
+		dev_err(mem_man->dev, "%s: buffer id %d not found\n", __func__,
+			buff_id);
+		mutex_unlock(mem_man->mutex);
+		return -EINVAL;
+	}
+
+	_img_mem_sync_cpu_to_device(buffer);
+
+	mutex_unlock(mem_man->mutex);
+	return 0;
+}
+
+static void _img_mem_sync_device_to_cpu(struct buffer *buffer)
+{
+	struct heap *heap = buffer->heap;
+
+	if (heap->ops && heap->ops->sync_dev_to_cpu)
+		heap->ops->sync_dev_to_cpu(heap, buffer);
+}
+
+int img_mem_sync_device_to_cpu(struct mem_ctx *ctx, int buff_id)
+{
+	struct mem_man *mem_man = &mem_man_data;
+	struct buffer *buffer;
+
+	mutex_lock_nested(mem_man->mutex, SUBCLASS_IMGMEM);
+	buffer = idr_find(ctx->buffers, buff_id);
+	if (!buffer) {
+		dev_err(mem_man->dev, "%s: buffer id %d not found\n", __func__,
+			buff_id);
+		mutex_unlock(mem_man->mutex);
+		return -EINVAL;
+	}
+
+	_img_mem_sync_device_to_cpu(buffer);
+
+	mutex_unlock(mem_man->mutex);
+	return 0;
+}
+
+static struct mmu_page_cfg *mmu_page_alloc(void *arg)
+{
+	struct mem_man *mem_man = &mem_man_data;
+	struct mmu_ctx *mmu_ctx = arg;
+	struct mmu_page *page;
+	struct buffer *buffer;
+	struct heap *heap;
+	int ret;
+
+	dev_dbg(mmu_ctx->device, "%s:%d arg %p\n", __func__, __LINE__, arg);
+
+	WARN_ON(!mutex_is_locked(mem_man->mutex));
+
+	page = kzalloc(sizeof(*page), GFP_KERNEL);
+	if (!page)
+		return NULL;
+
+	ret = _img_mem_alloc(mmu_ctx->device, mmu_ctx->mem_ctx,
+			     mmu_ctx->heap, PAGE_SIZE, (enum mem_attr)0, &buffer);
+	if (ret) {
+		dev_err(mmu_ctx->device, "%s: img_mem_alloc failed (%d)\n",
+			__func__, ret);
+		goto free_page;
+	}
+
+	ret = _img_mem_map_km(buffer);
+	if (ret) {
+		dev_err(mmu_ctx->device, "%s: img_mem_map_km failed (%d)\n",
+			__func__, ret);
+		goto free_buffer;
+	}
+
+	page->addr_shift = mmu_ctx->mmu_config_addr_width - 32;
+	page->buffer = buffer;
+	page->page_cfg.cpu_virt_addr = (unsigned long)buffer->kptr;
+
+	heap = buffer->heap;
+	if (heap->ops && heap->ops->get_sg_table) {
+		void *sgt;
+
+		ret = heap->ops->get_sg_table(heap, buffer, &sgt);
+		if (ret) {
+			dev_err(mmu_ctx->device,
+				"%s: heap %d buffer %d no sg_table!\n",
+				__func__, heap->id, buffer->id);
+			ret = -EINVAL;
+			goto free_buffer;
+		}
+		page->page_cfg.phys_addr = sg_phys(img_mmu_get_sgl(sgt));
+	} else {
+		dev_err(mmu_ctx->device, "%s: heap %d buffer %d no get_sg!\n",
+			__func__, heap->id, buffer->id);
+		ret = -EINVAL;
+		goto free_buffer;
+	}
+
+	dev_dbg(mmu_ctx->device, "%s:%d virt addr %#lx\n", __func__, __LINE__,
+		page->page_cfg.cpu_virt_addr);
+	dev_dbg(mmu_ctx->device, "%s:%d phys addr %#llx\n", __func__, __LINE__,
+		page->page_cfg.phys_addr);
+	return &page->page_cfg;
+
+free_buffer:
+	_img_mem_free(buffer);
+	kfree(buffer);
+free_page:
+	kfree(page);
+	return NULL;
+}
+
+static void mmu_page_free(struct mmu_page_cfg *arg)
+{
+	struct mem_man *mem_man = &mem_man_data;
+	struct mmu_page *page;
+
+	page = container_of(arg, struct mmu_page, page_cfg);
+
+	WARN_ON(!mutex_is_locked(mem_man->mutex));
+
+	_img_mem_free(page->buffer);
+	kfree(page->buffer);
+	kfree(page);
+}
+
+static void mmu_page_write(struct mmu_page_cfg *page_cfg,
+			   unsigned int offset, unsigned long long addr,
+			   unsigned int flags)
+{
+	unsigned int *mem = (unsigned int *)page_cfg->cpu_virt_addr;
+	struct mmu_page *mmu_page;
+	struct heap *heap;
+
+	mmu_page = container_of(page_cfg, struct mmu_page, page_cfg);
+	heap = mmu_page->buffer->heap;
+
+	/* skip translation when flags are zero, assuming address is invalid */
+	if (flags && heap->to_dev_addr)
+		addr = heap->to_dev_addr(&heap->options, addr);
+	addr >>= mmu_page->addr_shift;
+
+	mem[offset] = addr | flags;
+}
+
+static void mmu_update_page(struct mmu_page_cfg *arg)
+{
+	struct mem_man *mem_man = &mem_man_data;
+	struct mmu_page *page;
+
+	page = container_of(arg, struct mmu_page, page_cfg);
+
+	WARN_ON(!mutex_is_locked(mem_man->mutex));
+
+	_img_mem_sync_cpu_to_device(page->buffer);
+}
+
+int img_mmu_ctx_create(void *device, unsigned int mmu_config_addr_width,
+		       struct mem_ctx *mem_ctx, int heap_id,
+		       void (*callback_fn)(enum mmu_callback_type type,
+					   int buff_id, void *data),
+		       void *callback_data, struct mmu_ctx **mmu_ctx)
+{
+	struct mem_man *mem_man = &mem_man_data;
+
+	static struct mmu_info mmu_functions = {
+		.pfn_page_alloc = mmu_page_alloc,
+		.pfn_page_free = mmu_page_free,
+		.pfn_page_write = mmu_page_write,
+		.pfn_page_update = mmu_update_page,
+	};
+	struct mmu_ctx *ctx;
+	int ret;
+
+	if (mmu_config_addr_width < 32) {
+		dev_err(device,
+			"%s: invalid addr_width (%d) must be >= 32 !\n",
+			__func__, mmu_config_addr_width);
+		return -EINVAL;
+	}
+
+	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+	if (!ctx)
+		return -ENOMEM;
+
+	ctx->device = device;
+	ctx->mem_ctx = mem_ctx;
+	ctx->mmu_config_addr_width = mmu_config_addr_width;
+
+	mutex_lock_nested(mem_man->mutex, SUBCLASS_IMGMEM);
+
+	ctx->heap = idr_find(mem_man->heaps, heap_id);
+	if (!ctx->heap) {
+		dev_err(device, "%s: invalid heap_id (%d)!\n", __func__,
+			heap_id);
+		mutex_unlock(mem_man->mutex);
+		kfree(ctx);
+		return -EINVAL;
+	}
+
+	mmu_functions.alloc_ctx = ctx;
+	ctx->mmu_dir = mmu_create_directory(&mmu_functions);
+	if (IS_ERR_VALUE((unsigned long)ctx->mmu_dir)) {
+		ret = (long)(ctx->mmu_dir);
+		dev_err(device, "%s: directory create failed (%d)!\n", __func__,
+			ret);
+		ctx->mmu_dir = NULL;
+		mutex_unlock(mem_man->mutex);
+		kfree(ctx);
+		return ret;
+	}
+
+	list_add(&ctx->mem_ctx_entry, &mem_ctx->mmu_ctxs);
+	INIT_LIST_HEAD(&ctx->mappings);
+
+	ctx->callback_fn = callback_fn;
+	ctx->callback_data = callback_data;
+
+	*mmu_ctx = ctx;
+
+	mutex_unlock(mem_man->mutex);
+
+	return 0;
+}
+
+static void _img_mmu_ctx_destroy(struct mmu_ctx *ctx)
+{
+	struct mem_man *mem_man = &mem_man_data;
+	int ret;
+
+	while (!list_empty(&ctx->mappings)) {
+		struct mmu_ctx_mapping *map;
+
+		map = list_first_entry(&ctx->mappings,
+				       struct mmu_ctx_mapping, mmu_ctx_entry);
+#ifdef DEBUG_DECODER_DRIVER
+		dev_info(ctx->device,
+			 "%s: found mapped buffer %d (size %zu)\n",
+			 __func__, map->buffer->id, map->buffer->request_size);
+#endif
+
+		_img_mmu_unmap(map);
+
+		kfree(map);
+	}
+
+	ret = mmu_destroy_directory(ctx->mmu_dir);
+	if (ret)
+		dev_err(mem_man->dev, "mmu_destroy_directory failed (%d)!\n",
+			ret);
+	__list_del_entry(&ctx->mem_ctx_entry);
+}
+
+void img_mmu_ctx_destroy(struct mmu_ctx *ctx)
+{
+	struct mem_man *mem_man = &mem_man_data;
+
+	mutex_lock_nested(mem_man->mutex, SUBCLASS_IMGMEM);
+	_img_mmu_ctx_destroy(ctx);
+	mutex_unlock(mem_man->mutex);
+
+	kfree(ctx);
+}
+
+int img_mmu_map_sg(struct mmu_ctx *mmu_ctx, struct mem_ctx *mem_ctx,
+		   int buff_id, void *sgt, unsigned int virt_addr,
+		   unsigned int map_flags)
+{
+	struct mem_man *mem_man = &mem_man_data;
+	struct mmu_ctx_mapping *mapping;
+	struct mmu_heap_alloc heap_alloc;
+	struct buffer *buffer;
+	int ret = 0;
+
+	dev_dbg(mmu_ctx->device, "%s sgt %p virt_addr %#x\n", __func__,
+		sgt, virt_addr);
+
+	mapping = kzalloc(sizeof(*mapping), GFP_KERNEL);
+	if (!mapping)
+		return -ENOMEM;
+
+	mutex_lock_nested(mem_man->mutex, SUBCLASS_IMGMEM);
+	buffer = idr_find(mem_ctx->buffers, buff_id);
+	if (!buffer) {
+		dev_err(mmu_ctx->device, "%s: buffer id %d not found\n",
+			__func__, buff_id);
+		ret = -EINVAL;
+		goto error;
+	}
+	dev_dbg(mmu_ctx->device, "%s buffer %d 0x%p size %zu virt_addr %#x\n",
+		__func__, buff_id, buffer, buffer->request_size, virt_addr);
+
+	heap_alloc.virt_addr = virt_addr;
+	heap_alloc.alloc_size = buffer->actual_size;
+
+	mapping->mmu_ctx = mmu_ctx;
+	mapping->buffer = buffer;
+	mapping->virt_addr = virt_addr;
+
+	if (sgt) {
+		struct sg_table *sgt_new = sgt;
+
+		mapping->map = mmu_directory_map_sg(mmu_ctx->mmu_dir, sgt_new->sgl,
+						    &heap_alloc, map_flags);
+		if (IS_ERR_VALUE((unsigned long)mapping->map)) {
+			ret = (long)(mapping->map);
+			mapping->map = NULL;
+		}
+	} else {
+		dev_err(mmu_ctx->device, "%s: buffer %d no get_sg!\n",
+			__func__, buffer->id);
+		ret = -EINVAL;
+		goto error;
+	}
+	if (ret) {
+		dev_err(mmu_ctx->device, "mmu_directory_map_sg failed (%d)!\n",
+			ret);
+		goto error;
+	}
+
+	list_add(&mapping->mmu_ctx_entry, &mmu_ctx->mappings);
+	list_add(&mapping->buffer_entry, &mapping->buffer->mappings);
+
+	if (mmu_ctx->callback_fn)
+		mmu_ctx->callback_fn(MMU_CALLBACK_MAP, buffer->id,
+				     mmu_ctx->callback_data);
+
+	mutex_unlock(mem_man->mutex);
+	return 0;
+
+error:
+	mutex_unlock(mem_man->mutex);
+	kfree(mapping);
+	return ret;
+}
+
+int img_mmu_map(struct mmu_ctx *mmu_ctx, struct mem_ctx *mem_ctx,
+		int buff_id, unsigned int virt_addr, unsigned int map_flags)
+{
+	struct mem_man *mem_man = &mem_man_data;
+	struct mmu_ctx_mapping *mapping;
+	struct mmu_heap_alloc heap_alloc;
+	struct buffer *buffer;
+	struct heap *heap;
+	int ret;
+
+	dev_dbg(mmu_ctx->device, "%s buffer %d virt_addr %#x\n", __func__,
+		buff_id, virt_addr);
+
+	mapping = kzalloc(sizeof(*mapping), GFP_KERNEL);
+	if (!mapping)
+		return -ENOMEM;
+
+	mutex_lock_nested(mem_man->mutex, SUBCLASS_IMGMEM);
+	buffer = idr_find(mem_ctx->buffers, buff_id);
+	if (!buffer) {
+		dev_err(mmu_ctx->device, "%s: buffer id %d not found\n",
+			__func__, buff_id);
+		ret = -EINVAL;
+		goto error;
+	}
+	dev_dbg(mmu_ctx->device, "%s buffer %d 0x%p size %zu virt_addr %#x\n",
+		__func__, buff_id, buffer, buffer->request_size, virt_addr);
+
+	heap_alloc.virt_addr = virt_addr;
+	heap_alloc.alloc_size = buffer->actual_size;
+
+	mapping->mmu_ctx = mmu_ctx;
+	mapping->buffer = buffer;
+	mapping->virt_addr = virt_addr;
+
+	heap = buffer->heap;
+	if (heap->ops && heap->ops->get_sg_table) {
+		void *sgt;
+
+		ret = heap->ops->get_sg_table(heap, buffer, &sgt);
+		if (ret) {
+			dev_err(mmu_ctx->device,
+				"%s: heap %d buffer %d no sg_table!\n",
+				__func__, heap->id, buffer->id);
+			goto error;
+		}
+
+		mapping->map = mmu_directory_map_sg(mmu_ctx->mmu_dir, img_mmu_get_sgl(sgt),
+						    &heap_alloc, map_flags);
+		if (IS_ERR_VALUE((unsigned long)mapping->map)) {
+			ret = (long)(mapping->map);
+			mapping->map = NULL;
+		}
+	} else {
+		dev_err(mmu_ctx->device, "%s: heap %d buffer %d no get_sg!\n",
+			__func__, heap->id, buffer->id);
+		ret = -EINVAL;
+		goto error;
+	}
+	if (ret) {
+		dev_err(mmu_ctx->device, "mmu_directory_map failed (%d)!\n",
+			ret);
+		goto error;
+	}
+
+	list_add(&mapping->mmu_ctx_entry, &mmu_ctx->mappings);
+	list_add(&mapping->buffer_entry, &mapping->buffer->mappings);
+
+	if (mmu_ctx->callback_fn)
+		mmu_ctx->callback_fn(MMU_CALLBACK_MAP, buffer->id,
+				     mmu_ctx->callback_data);
+
+	mutex_unlock(mem_man->mutex);
+	return 0;
+
+error:
+	mutex_unlock(mem_man->mutex);
+	kfree(mapping);
+	return ret;
+}
+
+static void _img_mmu_unmap(struct mmu_ctx_mapping *mapping)
+{
+	struct mmu_ctx *ctx = mapping->mmu_ctx;
+	int res;
+
+	dev_dbg(ctx->device, "%s:%d mapping %p buffer %d\n", __func__,
+		__LINE__, mapping, mapping->buffer->id);
+
+	res = mmu_directory_unmap(mapping->map);
+	if (res)
+		dev_warn(ctx->device, "mmu_directory_unmap failed (%d)!\n",
+			 res);
+
+	__list_del_entry(&mapping->mmu_ctx_entry);
+	__list_del_entry(&mapping->buffer_entry);
+
+	if (ctx->callback_fn)
+		ctx->callback_fn(MMU_CALLBACK_UNMAP, mapping->buffer->id,
+				 ctx->callback_data);
+}
+
+int img_mmu_unmap(struct mmu_ctx *mmu_ctx, struct mem_ctx *mem_ctx,
+		  int buff_id)
+{
+	struct mem_man *mem_man = &mem_man_data;
+	struct mmu_ctx_mapping *mapping;
+	struct list_head *lst;
+
+	dev_dbg(mmu_ctx->device, "%s:%d buffer %d\n", __func__, __LINE__,
+		buff_id);
+
+	mutex_lock_nested(mem_man->mutex, SUBCLASS_IMGMEM);
+
+	mapping = NULL;
+	list_for_each(lst, &mmu_ctx->mappings) {
+		struct mmu_ctx_mapping *m;
+
+		m = list_entry(lst, struct mmu_ctx_mapping, mmu_ctx_entry);
+		if (m->buffer->id == buff_id) {
+			mapping = m;
+			break;
+		}
+	}
+
+	if (!mapping) {
+		dev_err(mmu_ctx->device, "%s: buffer id %d not found\n",
+			__func__, buff_id);
+		mutex_unlock(mem_man->mutex);
+		return -EINVAL;
+	}
+
+	_img_mmu_unmap(mapping);
+
+	mutex_unlock(mem_man->mutex);
+	kfree(mapping);
+	return 0;
+}
+
+int img_mmu_get_ptd(const struct mmu_ctx *ctx, unsigned int *ptd)
+{
+	struct mem_man *mem_man = &mem_man_data;
+	struct mmu_page_cfg *page_cfg;
+	unsigned long long addr;
+
+	mutex_lock_nested(mem_man->mutex, SUBCLASS_IMGMEM);
+
+	page_cfg = mmu_directory_get_page(ctx->mmu_dir);
+	if (!page_cfg) {
+		mutex_unlock(mem_man->mutex);
+		return -EINVAL;
+	}
+
+	addr = page_cfg->phys_addr;
+	if (ctx->heap->to_dev_addr)
+		addr = ctx->heap->to_dev_addr(&ctx->heap->options, addr);
+
+	mutex_unlock(mem_man->mutex);
+
+	*ptd = (unsigned int)(addr >>= VXD_MMU_SHIFT);
+
+	dev_dbg(ctx->device, "%s: addr %#llx ptd %#x\n", __func__,
+		page_cfg->phys_addr, *ptd);
+	return 0;
+}
+
+int img_mmu_get_pagetable_entry(const struct mmu_ctx *ctx, unsigned long dev_virt_addr)
+{
+	if (!ctx)
+		return 0xFFFFFF;
+
+	return mmu_directory_get_pagetable_entry(ctx->mmu_dir, dev_virt_addr);
+}
+
+/*
+ * Initialisation
+ */
+int img_mem_init(void *dev)
+{
+	struct mem_man *mem_man = &mem_man_data;
+
+	mem_man->dev = dev;
+	mem_man->heaps = kzalloc(sizeof(*mem_man->heaps), GFP_KERNEL);
+	if (!mem_man->heaps)
+		return -ENOMEM;
+	idr_init(mem_man->heaps);
+	INIT_LIST_HEAD(&mem_man->mem_ctxs);
+	mem_man->mutex = kzalloc(sizeof(*mem_man->mutex), GFP_KERNEL);
+	if (!mem_man->mutex) {
+		pr_err("Memory allocation failed for mutex\n");
+		return -ENOMEM;
+	}
+	mutex_init(mem_man->mutex);
+
+	return 0;
+}
+
+void img_mem_exit(void)
+{
+	struct mem_man *mem_man = &mem_man_data;
+	struct heap *heap;
+	int heap_id;
+
+	/* keeps mutex checks (WARN_ON) happy, this will never actually wait */
+	mutex_lock_nested(mem_man->mutex, SUBCLASS_IMGMEM);
+
+	while (!list_empty(&mem_man->mem_ctxs)) {
+		struct mem_ctx *mc;
+
+		mc = list_first_entry(&mem_man->mem_ctxs,
+				      struct mem_ctx, mem_man_entry);
+		dev_warn(mem_man->dev, "%s derelict memory context %p!\n",
+			 __func__, mc);
+		_img_mem_destroy_ctx(mc);
+		kfree(mc);
+	}
+
+	heap_id = MIN_HEAP;
+	heap = idr_get_next(mem_man->heaps, &heap_id);
+	while (heap) {
+		dev_warn(mem_man->dev, "%s derelict heap %d!\n", __func__,
+			 heap_id);
+		_img_mem_del_heap(heap);
+		kfree(heap);
+		heap_id = MIN_HEAP;
+		heap = idr_get_next(mem_man->heaps, &heap_id);
+	}
+	idr_destroy(mem_man->heaps);
+	kfree(mem_man->heaps);
+
+	mutex_unlock(mem_man->mutex);
+
+	mutex_destroy(mem_man->mutex);
+	kfree(mem_man->mutex);
+	mem_man->mutex = NULL;
+}
diff --git a/drivers/staging/media/vxd/common/img_mem_man.h b/drivers/staging/media/vxd/common/img_mem_man.h
new file mode 100644
index 000000000000..1a10ad994d6e
--- /dev/null
+++ b/drivers/staging/media/vxd/common/img_mem_man.h
@@ -0,0 +1,231 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * IMG DEC Memory Manager header file
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ *	Angela Stegmaier <angelabaker@ti.com>
+ *
+ * Re-written for upstreamimg
+ *	Sidraya Jayagond <sidraya.bj@pathpartnertech.com>
+ */
+
+#ifndef _IMG_DEC_MEM_MGR_H
+#define _IMG_DEC_MEM_MGR_H
+
+#include <linux/types.h>
+
+/* buffer ids (per memory context) */
+#define MEM_MAN_MIN_BUFFER 1
+#define MEM_MAN_MAX_BUFFER 16384
+
+enum mem_attr {
+	MEM_ATTR_CACHED        = 0x00000001,
+	MEM_ATTR_UNCACHED      = 0x00000002,
+	MEM_ATTR_WRITECOMBINE  = 0x00000004,
+	MEM_ATTR_SECURE        = 0x00000010,
+	MEM_ATTR_FORCE32BITS = 0x7FFFFFFFU
+};
+
+enum mmu_callback_type {
+	MMU_CALLBACK_MAP = 1,
+	MMU_CALLBACK_UNMAP,
+	MMU_CALLBACK_FORCE32BITS = 0x7FFFFFFFU
+};
+
+enum heap_type {
+	MEM_HEAP_TYPE_UNIFIED = 1,
+	MEM_HEAP_TYPE_FORCE32BITS = 0x7FFFFFFFU
+};
+
+union heap_options {
+	struct {
+		long long gfp_type; /* pool and flags for buffer allocations */
+	} unified;
+};
+
+/**
+ * struct heap_config - contains heap configuration structure
+ * @type: enumeration of heap_type
+ * @options: pool and flags for buffer allocations, eg GFP_KERNEL
+ * @to_dev_addr: function pointer for retrieving device addr
+ */
+struct heap_config {
+	enum heap_type type;
+	union heap_options options;
+	unsigned long long (*to_dev_addr)(union heap_options *opts, unsigned long long addr);
+};
+
+/*
+ * struct mmu_heap - typedef for mmu_heap
+ * @virt_addr_start: start of the device virtual address
+ * @alloc_atom: atom allocation in bytes
+ * @size: total size of the heap in bytes
+ */
+struct mmu_heap {
+	unsigned long	virt_addr_start;
+	unsigned long alloc_atom;
+	unsigned long size;
+};
+
+/*
+ * struct mem_ctx - the memory context
+ * @buffers: idr list of buffers
+ * @mmu_ctxs: contains linked lists of struct mmu_ctx
+ * @mem_man_entry: the entry list for dev_mem_main:mem_ctxs linked list
+ */
+struct mem_ctx {
+	struct idr *buffers;
+	struct list_head mmu_ctxs;
+	struct list_head mem_man_entry;
+};
+
+/*
+ * struct mmu_ctx_mapping - the mmu context mapping information
+ * @mmu_ctx: pointer to the mmu_ctx to which this mmu mapping information
+ *	     belongs
+ * @buffer: pointer to the buffer which this mmu_ctx_mapping is for
+ * @map: pointer to the mmu_map which this mmu_ctx_mapping belongs
+ * @virt_addr: Virtual address
+ * @mmu_ctx_entry: the entry list for mmu_ctx:mapping linked list.
+ * @buffer_entry: the entry list for buffer:mappings linked list.
+ */
+struct mmu_ctx_mapping {
+	struct mmu_ctx *mmu_ctx;
+	struct buffer *buffer;
+	struct mmu_map *map;
+	unsigned int virt_addr;
+	struct list_head mmu_ctx_entry;
+	struct list_head buffer_entry;
+};
+
+/*
+ * struct mmu_ctx - the mmu context information - one per stream
+ * @device: pointer to the device
+ * @mmu_config_addr_width: the address width for the mmu config
+ * @mem_ctx: pointer to mem_ctx where this mmu_ctx belongs to
+ * @heap: pointer to struct heap to where this mem_ctx belongs to
+ * @mmu_dir: pointer to the mmu_directory this mmu_ctx belongs to
+ * @mappings: contains linked list of struct mmu_ctx_mapping
+ * @mem_ctx_entry: the entry list for mem_ctx:mmu_ctxs
+ * @callback_fn: pointer to function callback
+ * @callback_data: pointer to the callback data
+ */
+struct mmu_ctx {
+	void *device;
+	unsigned int mmu_config_addr_width;
+	struct mem_ctx *mem_ctx;
+	struct heap *heap;
+	struct mmu_directory *mmu_dir;
+	struct list_head mappings;
+	struct list_head mem_ctx_entry;
+	void (*callback_fn)(enum mmu_callback_type type, int buff_id,
+			    void *data);
+	void *callback_data;
+};
+
+/*
+ * struct buffer - the mmu context information - one per stream
+ * @id: buffer identification
+ * @request_size: request size for the allocation
+ * @actual_size: size aligned with the PAGE_SIZE allocation
+ * @device: pointer to the device
+ * @mem_ctx: pointer to struct mem_ctx to where this buffer belongs to
+ * @heap: pointer to struct heap to where this buffer belongs to
+ * @mappings: contains linked lists of struct mmu_ctx_mapping
+ * @kptr: pointer to virtual mapping for the buffer object into kernel address
+ *	  space
+ * @priv: pointer to priv data used for scaterlist table info
+ */
+struct buffer {
+	int id; /* Generated in <mem_ctx:buffers> */
+	unsigned long request_size;
+	unsigned long actual_size;
+	void *device;
+	struct mem_ctx *mem_ctx;
+	struct heap *heap;
+	struct list_head mappings; /* contains <struct mmu_ctx_mapping> */
+	void *kptr;
+	void *priv;
+};
+
+struct heap_ops {
+	int (*alloc)(void *device, struct heap *heap,
+		     unsigned long size, enum mem_attr attr,
+		     struct buffer *buffer);
+	void (*free)(struct heap *heap, struct buffer *buffer);
+	int (*map_km)(struct heap *heap, struct buffer *buffer);
+	int (*get_sg_table)(struct heap *heap, struct buffer *buffer,
+			    void **sg_table);
+	void (*sync_cpu_to_dev)(struct heap *heap, struct buffer *buffer);
+	void (*sync_dev_to_cpu)(struct heap *heap, struct buffer *buffer);
+	void (*destroy)(struct heap *heap);
+};
+
+struct heap {
+	int id; /* Generated in <mem_man:heaps> */
+	enum heap_type type;
+	struct heap_ops *ops;
+	union heap_options options;
+	unsigned long long (*to_dev_addr)(union heap_options *opts, unsigned long long addr);
+	void *priv;
+};
+
+int img_mem_init(void *dev);
+void img_mem_exit(void);
+
+int img_mem_create_ctx(struct mem_ctx **new_ctx);
+void img_mem_destroy_ctx(struct mem_ctx *ctx);
+
+int img_mem_import(void *device, struct mem_ctx *ctx,
+		   unsigned long size, enum mem_attr attr, int *buf_id);
+
+int img_mem_alloc(void *device, struct mem_ctx *ctx, int heap_id,
+		  unsigned long size, enum mem_attr attributes, int *buf_id);
+void img_mem_free(struct mem_ctx *ctx, int buff_id);
+
+void img_mem_free_bufid(struct mem_ctx *ctx, int buf_id);
+
+int img_mem_map_km(struct mem_ctx *ctx, int buf_id);
+void *img_mem_get_kptr(struct mem_ctx *ctx, int buff_id);
+
+int img_mem_sync_cpu_to_device(struct mem_ctx *ctx, int buf_id);
+int img_mem_sync_device_to_cpu(struct mem_ctx *ctx, int buf_id);
+
+int img_mmu_ctx_create(void *device, unsigned int mmu_config_addr_width,
+		       struct mem_ctx *mem_ctx, int heap_id,
+		       void (*callback_fn)(enum mmu_callback_type type,
+					   int buff_id, void *data),
+		       void *callback_data, struct mmu_ctx **mmu_ctx);
+void img_mmu_ctx_destroy(struct mmu_ctx *ctx);
+
+int img_mmu_map(struct mmu_ctx *mmu_ctx, struct mem_ctx *mem_ctx,
+		int buff_id, unsigned int virt_addr, unsigned int map_flags);
+int img_mmu_map_sg(struct mmu_ctx *mmu_ctx, struct mem_ctx *mem_ctx,
+		   int buff_id, void *sgt, unsigned int virt_addr,
+		   unsigned int map_flags);
+int img_mmu_unmap(struct mmu_ctx *mmu_ctx, struct mem_ctx *mem_ctx,
+		  int buff_id);
+
+int img_mmu_get_ptd(const struct mmu_ctx *ctx, unsigned int *ptd);
+
+int img_mmu_get_pagetable_entry(const struct mmu_ctx *ctx, unsigned long dev_virt_addr);
+
+int img_mem_add_heap(const struct heap_config *heap_cfg, int *heap_id);
+void img_mem_del_heap(int heap_id);
+
+/* Heap operation related function */
+int img_mem_unified_init(const struct heap_config *config,
+			 struct heap *heap);
+
+/* page and sg list related functions */
+void img_mmu_get_pages(void **page_args, void *sgt_args);
+unsigned int img_mmu_get_orig_nents(void *sgt_args);
+void img_mmu_set_sgt_nents(void *sgt_args, int ret);
+void img_mmu_set_sg_table(void **sg_table_args, void *buffer);
+unsigned int img_mmu_get_sgl_length(void *sgl_args);
+void *img_mmu_get_sgl(void *sgt_args);
+
+#endif /* _IMG_DEC_MEM_MGR */
diff --git a/drivers/staging/media/vxd/common/img_mem_unified.c b/drivers/staging/media/vxd/common/img_mem_unified.c
new file mode 100644
index 000000000000..30108b25d8b0
--- /dev/null
+++ b/drivers/staging/media/vxd/common/img_mem_unified.c
@@ -0,0 +1,276 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * IMG DEC Memory Manager for unified memory
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ *	Angela Stegmaier <angelabaker@ti.com>
+ *
+ * Re-written for upstreamimg
+ *	Sidraya Jayagond <sidraya.bj@pathpartnertech.com>
+ */
+
+#include <linux/dma-mapping.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-mem2mem.h>
+
+#include "img_mem_man.h"
+
+void img_mmu_get_pages(void **page_args, void *sgt_args)
+{
+	struct page **pages = (struct page **)page_args;
+	struct sg_table *sgt = sgt_args;
+	struct scatterlist *sgl = sgt->sgl;
+	int i;
+
+	i = 0;
+	while (sgl) {
+		pages[i++] = sg_page(sgl);
+		sgl = sg_next(sgl);
+	}
+}
+
+unsigned int img_mmu_get_orig_nents(void *sgt_args)
+{
+	struct sg_table *sgt = sgt_args;
+
+	return sgt->orig_nents;
+}
+
+void img_mmu_set_sgt_nents(void *sgt_args, int ret)
+{
+	struct sg_table *sgt = sgt_args;
+
+	sgt->nents = ret;
+}
+
+void img_mmu_set_sg_table(void **sg_table_args, void *buffer)
+{
+	struct sg_table **sg_table = (struct sg_table **)sg_table_args;
+
+	*sg_table = buffer;
+}
+
+unsigned int img_mmu_get_sgl_length(void *sgl_args)
+{
+	struct scatterlist *sgl = (struct scatterlist *)sgl_args;
+
+	return sgl->length;
+}
+
+void *img_mmu_get_sgl(void *sgt_args)
+{
+	struct sg_table *sgt = sgt_args;
+
+	return sgt->sgl;
+}
+
+static int unified_alloc(void *device, struct heap *heap,
+			 unsigned long size, enum mem_attr attr,
+			 struct buffer *buffer)
+{
+	struct sg_table *sgt;
+	void *sgl;
+	int pages;
+	int ret;
+
+	dev_dbg(device, "%s:%d buffer %d (0x%p)\n", __func__, __LINE__,
+		buffer->id, buffer);
+
+	sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
+	if (!sgt)
+		return -ENOMEM;
+
+	pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
+
+	ret = sg_alloc_table(sgt, pages, GFP_KERNEL);
+	if (ret)
+		goto sg_alloc_table_failed;
+
+	sgl = img_mmu_get_sgl(sgt);
+	while (sgl) {
+		void *page;
+		unsigned long long dma_addr;
+
+		page = alloc_page(heap->options.unified.gfp_type);
+		if (!page) {
+			dev_err(device, "%s alloc_page failed!\n", __func__);
+			ret = -ENOMEM;
+			goto alloc_page_failed;
+		}
+
+		/*
+		 * dma_map_page() is probably going to fail if alloc flags are
+		 * GFP_HIGHMEM, since it is not mapped to CPU. Hopefully, this
+		 * will never happen because memory of this sort cannot be used
+		 * for DMA anyway. To check if this is the case, build with
+		 * debug, set trace_physical_pages=1 and check if page_address
+		 * printed above is NULL
+		 */
+		dma_addr = dma_map_page(device, page, 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
+		if (dma_mapping_error(device, dma_addr)) {
+			__free_page(page);
+			dev_err(device, "%s dma_map_page failed!\n", __func__);
+			ret = -EIO;
+			goto alloc_page_failed;
+		}
+		dma_unmap_page(device, dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
+
+		sg_set_page(sgl, page, PAGE_SIZE, 0);
+
+		sgl = sg_next(sgl);
+	}
+
+	buffer->priv = sgt;
+	return 0;
+
+alloc_page_failed:
+	sgl = img_mmu_get_sgl(sgt);
+	while (sgl) {
+		void *page = sg_page(sgl);
+
+		if (page)
+			__free_page(page);
+
+		sgl = sg_next(sgl);
+	}
+	sg_free_table(sgt);
+sg_alloc_table_failed:
+	kfree(sgt);
+	return ret;
+}
+
+static void unified_free(struct heap *heap, struct buffer *buffer)
+{
+	void *dev = buffer->device;
+	void *sgt = buffer->priv;
+	void *sgl;
+
+	dev_dbg(dev, "%s:%d buffer %d (0x%p)\n", __func__, __LINE__,
+		buffer->id, buffer);
+
+	if (buffer->kptr) {
+		dev_dbg(dev, "%s vunmap 0x%p\n", __func__, buffer->kptr);
+		dma_unmap_sg(dev, img_mmu_get_sgl(sgt), img_mmu_get_orig_nents(sgt),
+			     DMA_FROM_DEVICE);
+		vunmap(buffer->kptr);
+	}
+
+	sgl = img_mmu_get_sgl(sgt);
+	while (sgl) {
+		__free_page(sg_page(sgl));
+		sgl = sg_next(sgl);
+	}
+	sg_free_table(sgt);
+	kfree(sgt);
+}
+
+static int unified_map_km(struct heap *heap, struct buffer *buffer)
+{
+	void *dev = buffer->device;
+	void *sgt = buffer->priv;
+	void *sgl = img_mmu_get_sgl(sgt);
+	unsigned int num_pages = sg_nents(sgl);
+	unsigned int orig_nents = img_mmu_get_orig_nents(sgt);
+	void **pages;
+	int ret;
+	pgprot_t prot;
+
+	dev_dbg(dev, "%s:%d buffer %d (0x%p)\n", __func__, __LINE__, buffer->id, buffer);
+
+	if (buffer->kptr) {
+		dev_warn(dev, "%s called for already mapped buffer %d\n", __func__, buffer->id);
+		return 0;
+	}
+
+	pages = kmalloc_array(num_pages, sizeof(void *), GFP_KERNEL);
+	if (!pages)
+		return -ENOMEM;
+
+	img_mmu_get_pages(pages, sgt);
+
+	prot = PAGE_KERNEL;
+	prot = pgprot_writecombine(prot);
+	buffer->kptr = vmap((struct page **)pages, num_pages, VM_MAP, prot);
+	kfree(pages);
+	if (!buffer->kptr) {
+		dev_err(dev, "%s vmap failed!\n", __func__);
+		return -EFAULT;
+	}
+
+	ret = dma_map_sg(dev, sgl, orig_nents, DMA_FROM_DEVICE);
+
+	if (ret <= 0) {
+		dev_err(dev, "%s dma_map_sg failed!\n", __func__);
+		vunmap(buffer->kptr);
+		return -EFAULT;
+	}
+	dev_dbg(dev, "%s:%d buffer %d orig_nents %d nents %d\n", __func__,
+		__LINE__, buffer->id, orig_nents, ret);
+
+	img_mmu_set_sgt_nents(sgt, ret);
+
+	dev_dbg(dev, "%s:%d buffer %d vmap to 0x%p\n", __func__, __LINE__,
+		buffer->id, buffer->kptr);
+
+	return 0;
+}
+
+static int unified_get_sg_table(struct heap *heap, struct buffer *buffer, void **sg_table)
+{
+	img_mmu_set_sg_table(sg_table, buffer->priv);
+	return 0;
+}
+
+static void unified_sync_cpu_to_dev(struct heap *heap, struct buffer *buffer)
+{
+	void *dev = buffer->device;
+	void *sgt = buffer->priv;
+
+	if (!buffer->kptr)
+		return;
+
+	dev_dbg(dev, "%s:%d buffer %d (0x%p)\n", __func__, __LINE__, buffer->id, buffer);
+
+	dma_sync_sg_for_device(dev, img_mmu_get_sgl(sgt), img_mmu_get_orig_nents(sgt),
+			       DMA_TO_DEVICE);
+}
+
+static void unified_sync_dev_to_cpu(struct heap *heap, struct buffer *buffer)
+{
+	void *dev = buffer->device;
+	void *sgt = buffer->priv;
+
+	if (!buffer->kptr)
+		return;
+
+	dev_dbg(dev, "%s:%d buffer %d (0x%p)\n", __func__, __LINE__,
+		buffer->id, buffer);
+
+	dma_sync_sg_for_cpu(dev, img_mmu_get_sgl(sgt), img_mmu_get_orig_nents(sgt),
+			    DMA_FROM_DEVICE);
+}
+
+static void unified_heap_destroy(struct heap *heap)
+{
+}
+
+static struct heap_ops unified_heap_ops = {
+	.alloc = unified_alloc,
+	.free = unified_free,
+	.map_km = unified_map_km,
+	.get_sg_table = unified_get_sg_table,
+	.sync_cpu_to_dev = unified_sync_cpu_to_dev,
+	.sync_dev_to_cpu = unified_sync_dev_to_cpu,
+	.destroy = unified_heap_destroy,
+};
+
+int img_mem_unified_init(const struct heap_config *heap_cfg,
+			 struct heap *heap)
+{
+	heap->ops = &unified_heap_ops;
+	return 0;
+}
-- 
2.17.1


-- 






This
message contains confidential information and is intended only 
for the
individual(s) named. If you are not the intended
recipient, you are 
notified that disclosing, copying, distributing or taking any
action in 
reliance on the contents of this mail and attached file/s is strictly
prohibited. Please notify the
sender immediately and delete this e-mail 
from your system. E-mail transmission
cannot be guaranteed to be secured or 
error-free as information could be
intercepted, corrupted, lost, destroyed, 
arrive late or incomplete, or contain
viruses. The sender therefore does 
not accept liability for any errors or
omissions in the contents of this 
message, which arise as a result of e-mail
transmission.

^ permalink raw reply related	[flat|nested] 48+ messages in thread

* [PATCH 04/30] v4l: vxd-dec: Add vxd helper library
  2021-08-18 14:10 [PATCH 00/30] TI Video Decoder driver upstreaming to v5.14-rc6 kernel sidraya.bj
                   ` (2 preceding siblings ...)
  2021-08-18 14:10 ` [PATCH 03/30] v4l: vxd-dec: Create vxd_dec Mem Manager " sidraya.bj
@ 2021-08-18 14:10 ` sidraya.bj
  2021-08-18 14:10 ` [PATCH 05/30] v4l: vxd-dec: Add IMG VXD Video Decoder mem to mem drive sidraya.bj
                   ` (27 subsequent siblings)
  31 siblings, 0 replies; 48+ messages in thread
From: sidraya.bj @ 2021-08-18 14:10 UTC (permalink / raw)
  To: gregkh, linux-staging, linux-kernel
  Cc: prashanth.ka, praneeth, mchehab, linux-media, praveen.ap, Sidraya

From: Sidraya <sidraya.bj@pathpartnertech.com>

The vxd helper provides the functionality for firmware blob preparation
and loading, power management (core reset, etc.), firmware messaging,
interrupt handling, managing the hardware status, and error handling.

The vxd helper also interacts with the memory manager helper to create
a context for each stream and associate it with the mmu context. The
common mappings are done during this creation for the firmware and
rendec buffers.

Signed-off-by: Buddy Liong <buddy.liong@ti.com>
Signed-off-by: Angela Stegmaier <angelabaker@ti.com>
Signed-off-by: Sidraya <sidraya.bj@pathpartnertech.com>
---
 MAINTAINERS                                   |    4 +
 .../media/vxd/decoder/img_dec_common.h        |  278 +++
 drivers/staging/media/vxd/decoder/vxd_pvdec.c | 1745 +++++++++++++++++
 .../media/vxd/decoder/vxd_pvdec_priv.h        |  126 ++
 .../media/vxd/decoder/vxd_pvdec_regs.h        |  779 ++++++++
 5 files changed, 2932 insertions(+)
 create mode 100644 drivers/staging/media/vxd/decoder/img_dec_common.h
 create mode 100644 drivers/staging/media/vxd/decoder/vxd_pvdec.c
 create mode 100644 drivers/staging/media/vxd/decoder/vxd_pvdec_priv.h
 create mode 100644 drivers/staging/media/vxd/decoder/vxd_pvdec_regs.h

diff --git a/MAINTAINERS b/MAINTAINERS
index 150272927839..0f8154b69a91 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -19542,6 +19542,10 @@ F:	drivers/staging/media/vxd/common/img_mem_man.h
 F:	drivers/staging/media/vxd/common/img_mem_unified.c
 F:	drivers/staging/media/vxd/common/imgmmu.c
 F:	drivers/staging/media/vxd/common/imgmmu.h
+F:	drivers/staging/media/vxd/decoder/img_dec_common.h
+F:	drivers/staging/media/vxd/decoder/vxd_pvdec.c
+F:	drivers/staging/media/vxd/decoder/vxd_pvdec_priv.h
+F:	drivers/staging/media/vxd/decoder/vxd_pvdec_regs.h
 
 VIDEO I2C POLLING DRIVER
 M:	Matt Ranostay <matt.ranostay@konsulko.com>
diff --git a/drivers/staging/media/vxd/decoder/img_dec_common.h b/drivers/staging/media/vxd/decoder/img_dec_common.h
new file mode 100644
index 000000000000..7bb3bd6d6e78
--- /dev/null
+++ b/drivers/staging/media/vxd/decoder/img_dec_common.h
@@ -0,0 +1,278 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * IMG DEC common header
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 exas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ *	Amit Makani <amit.makani@ti.com>
+ *
+ * Re-written for upstreamimg
+ *	Sidraya Jayagond <sidraya.bj@pathpartnertech.com>
+ *	Prashanth Kumar Amai <prashanth.ka@pathpartnertech.com>
+ */
+
+#ifndef _IMG_DEC_COMMON_H
+#define _IMG_DEC_COMMON_H
+
+#include <linux/types.h>
+
+#define VXD_MAX_PIPES 2
+#define MAX_DST_BUFFERS 32
+
+/* Helpers for parsing core properties. Based on HW registers layout. */
+#define VXD_GET_BITS(v, lb, rb, type) \
+	({                                \
+		type __rb = (rb);                                       \
+		(((v) >> (__rb)) & ((1 << ((lb) - __rb + 1)) - 1)); })
+#define VXD_GET_BIT(v, b) (((v) >> (b)) & 1)
+
+/* Get major core revision. */
+#define VXD_MAJ_REV(props) (VXD_GET_BITS((props).core_rev, 23, 16, unsigned int))
+/* Get minor core revision. */
+#define VXD_MIN_REV(props) (VXD_GET_BITS((props).core_rev, 15, 8, unsigned int))
+/* Get maint core revision. */
+#define VXD_MAINT_REV(props) (VXD_GET_BITS((props).core_rev, 7, 0, unsigned int))
+/* Get number of entropy pipes available (HEVC). */
+#define VXD_NUM_ENT_PIPES(props) ((props).pvdec_core_id & 0xF)
+/* Get number of pixel pipes available (other standards). */
+#define VXD_NUM_PIX_PIPES(props) (((props).pvdec_core_id & 0xF0) >> 4)
+/* Get number of bits used by external memory interface. */
+#define VXD_EXTRN_ADDR_WIDTH(props) ((((props).mmu_config0 & 0xF0) >> 4) + 32)
+
+/* Check whether specific standard is supported by the pixel pipe. */
+#define VXD_HAS_MPEG2(props, pipe) VXD_GET_BIT(props.pixel_pipe_cfg[pipe], 0)
+#define VXD_HAS_MPEG4(props, pipe) VXD_GET_BIT(props.pixel_pipe_cfg[pipe], 1)
+#define VXD_HAS_H264(props, pipe) VXD_GET_BIT(props.pixel_pipe_cfg[pipe], 2)
+#define VXD_HAS_VC1(props, pipe) VXD_GET_BIT(props.pixel_pipe_cfg[pipe], 3)
+#define VXD_HAS_WMV9(props, pipe) VXD_GET_BIT(props.pixel_pipe_cfg[pipe], 4)
+#define VXD_HAS_JPEG(props, pipe) VXD_GET_BIT(props.pixel_pipe_cfg[pipe], 5)
+#define VXD_HAS_MPEG4_DATA_PART(props, pipe) \
+	VXD_GET_BIT(props.pixel_pipe_cfg[pipe], 6)
+#define VXD_HAS_AVS(props, pipe) VXD_GET_BIT(props.pixel_pipe_cfg[pipe], 7)
+#define VXD_HAS_REAL(props, pipe) VXD_GET_BIT(props.pixel_pipe_cfg[pipe], 8)
+#define VXD_HAS_VP6(props, pipe) VXD_GET_BIT(props.pixel_pipe_cfg[pipe], 9)
+#define VXD_HAS_VP8(props, pipe) VXD_GET_BIT(props.pixel_pipe_cfg[pipe], 10)
+#define VXD_HAS_SORENSON(props, pipe) \
+	VXD_GET_BIT(props.pixel_pipe_cfg[pipe], 11)
+#define VXD_HAS_HEVC(props, pipe) VXD_GET_BIT(props.pixel_pipe_cfg[pipe], 22)
+
+/* Check whether specific feature is supported by the pixel pipe */
+
+/*
+ * Max picture size for HEVC still picture profile is 64k wide and/or 64k
+ * high.
+ */
+#define VXD_HAS_HEVC_64K_STILL(props, pipe) \
+	(VXD_GET_BIT((props).pixel_misc_cfg[pipe], 24))
+
+/* Pixel processing pipe index. */
+#define VXD_PIX_PIPE_ID(props, pipe) \
+	(VXD_GET_BITS((props).pixel_misc_cfg[pipe], 18, 16, unsigned int))
+
+/* Number of stream supported by the pixel pipe DMAC and shift register. */
+#define VXD_PIX_NUM_STRS(props, pipe) \
+	(VXD_GET_BITS((props).pixel_misc_cfg[pipe], 13, 12, unsigned int) + 1)
+
+/* Is scaling supported. */
+#define VXD_HAS_SCALING(props, pipe) \
+	(VXD_GET_BIT((props).pixel_misc_cfg[pipe], 9))
+
+/* Is rotation supported. */
+#define VXD_HAS_ROTATION(props, pipe) \
+	(VXD_GET_BIT((props).pixel_misc_cfg[pipe], 8))
+
+/* Are HEVC range extensions supported. */
+#define VXD_HAS_HEVC_REXT(props, pipe) \
+	(VXD_GET_BIT((props).pixel_misc_cfg[pipe], 7))
+
+/* Maximum bit depth supported by the pipe. */
+#define VXD_MAX_BIT_DEPTH(props, pipe) \
+	(VXD_GET_BITS((props).pixel_misc_cfg[pipe], 6, 4, unsigned int) + 8)
+
+/*
+ * Maximum chroma fomar supported by the pipe in HEVC mode.
+ * 0x1 - 4:2:0
+ * 0x2 - 4:2:2
+ * 0x3 - 4:4:4
+ */
+#define VXD_MAX_HEVC_CHROMA_FMT(props, pipe) \
+	(VXD_GET_BITS((props).pixel_misc_cfg[pipe], 3, 2, unsigned int))
+
+/*
+ * Maximum chroma format supported by the pipe in H264 mode.
+ * 0x1 - 4:2:0
+ * 0x2 - 4:2:2
+ * 0x3 - 4:4:4
+ */
+#define VXD_MAX_H264_CHROMA_FMT(props, pipe) \
+	(VXD_GET_BITS((props).pixel_misc_cfg[pipe], 1, 0, unsigned int))
+
+/*
+ * Maximum frame width and height supported in MSVDX pipeline.
+ */
+#define VXD_MAX_WIDTH_MSVDX(props) \
+	(2 << (VXD_GET_BITS((props).pixel_max_frame_cfg, 4, 0, unsigned int)))
+#define VXD_MAX_HEIGHT_MSVDX(props) \
+	(2 << (VXD_GET_BITS((props).pixel_max_frame_cfg, 12, 8, unsigned int)))
+
+/*
+ * Maximum frame width and height supported in PVDEC pipeline.
+ */
+#define VXD_MAX_WIDTH_PVDEC(props) \
+	(2 << (VXD_GET_BITS((props).pixel_max_frame_cfg, 20, 16, unsigned int)))
+#define VXD_MAX_HEIGHT_PVDEC(props) \
+	(2 << (VXD_GET_BITS((props).pixel_max_frame_cfg, 28, 24, unsigned int)))
+
+#define PVDEC_COMMS_RAM_OFFSET      0x00002000
+#define PVDEC_COMMS_RAM_SIZE        0x00001000
+#define PVDEC_ENTROPY_OFFSET        0x00003000
+#define PVDEC_ENTROPY_SIZE          0x1FF
+#define PVDEC_VEC_BE_OFFSET         0x00005000
+#define PVDEC_VEC_BE_SIZE           0x3FF
+#define PVDEC_VEC_BE_CODEC_OFFSET   0x00005400
+#define MSVDX_VEC_OFFSET            0x00006000
+#define MSVDX_VEC_SIZE              0x7FF
+#define MSVDX_CMD_OFFSET            0x00007000
+
+/*
+ * Virtual memory heap address ranges for tiled
+ * and non-tiled buffers. Addresses within each
+ * range should be assigned to the appropriate
+ * buffers by the UM driver and mapped into the
+ * device using the corresponding KM driver ioctl.
+ */
+#define PVDEC_HEAP_UNTILED_START    0x00400000ul
+#define PVDEC_HEAP_UNTILED_SIZE     0x3FC00000ul
+#define PVDEC_HEAP_TILE512_START    0x40000000ul
+#define PVDEC_HEAP_TILE512_SIZE     0x10000000ul
+#define PVDEC_HEAP_TILE1024_START   0x50000000ul
+#define PVDEC_HEAP_TILE1024_SIZE    0x20000000ul
+#define PVDEC_HEAP_TILE2048_START   0x70000000ul
+#define PVDEC_HEAP_TILE2048_SIZE    0x30000000ul
+#define PVDEC_HEAP_TILE4096_START   0xA0000000ul
+#define PVDEC_HEAP_TILE4096_SIZE    0x30000000ul
+#define PVDEC_HEAP_BITSTREAM_START  0xD2000000ul
+#define PVDEC_HEAP_BITSTREAM_SIZE   0x0A000000ul
+#define PVDEC_HEAP_STREAM_START     0xE4000000ul
+#define PVDEC_HEAP_STREAM_SIZE      0x1C000000ul
+
+/*
+ * Max size of the message payload, in bytes. There are 7 bits used to encode
+ * the message size in the firmware interface.
+ */
+#define VXD_MAX_PAYLOAD_SIZE (127 * sizeof(unsigned int))
+/* Max size of the input message in bytes. */
+#define VXD_MAX_INPUT_SIZE (VXD_MAX_PAYLOAD_SIZE + sizeof(struct vxd_fw_msg))
+/*
+ * Min size of the input message. Two words needed for message header and
+ * stream PTD
+ */
+#define VXD_MIN_INPUT_SIZE 2
+/*
+ * Offset of the stream PTD within message. This word has to be left null in
+ * submitted message, driver will fill it in with an appropriate value.
+ */
+#define VXD_PTD_MSG_OFFSET 1
+
+/* Read flags */
+#define VXD_FW_MSG_RD_FLAGS_MASK 0xffff
+/* Driver watchdog interrupted processing of the message. */
+#define VXD_FW_MSG_FLAG_DWR 0x1
+/* VXD MMU fault occurred when the message was processed. */
+#define VXD_FW_MSG_FLAG_MMU_FAULT 0x2
+/* Invalid input message, e.g. the message was too large. */
+#define VXD_FW_MSG_FLAG_INV 0x4
+/* I/O error occurred when the message was processed. */
+#define VXD_FW_MSG_FLAG_DEV_ERR 0x8
+/*
+ * Driver error occurred when the message was processed, e.g. failed to
+ * allocate memory.
+ */
+#define VXD_FW_MSG_FLAG_DRV_ERR 0x10
+/*
+ * Item was canceled, without being fully processed
+ * i.e. corresponding stream was destroyed.
+ */
+#define VXD_FW_MSG_FLAG_CANCELED 0x20
+/* Firmware internal error occurred when the message was processed */
+#define VXD_FW_MSG_FLAG_FATAL 0x40
+
+/* Write flags */
+#define VXD_FW_MSG_WR_FLAGS_MASK 0xffff0000
+/* Indicates that message shall be dropped after sending it to the firmware. */
+#define VXD_FW_MSG_FLAG_DROP 0x10000
+/*
+ * Indicates that message shall be exclusively handled by
+ * the firmware/hardware. Any other pending messages are
+ * blocked until such message is handled.
+ */
+#define VXD_FW_MSG_FLAG_EXCL 0x20000
+
+#define VXD_MSG_SIZE(msg) (sizeof(struct vxd_fw_msg) + ((msg).payload_size))
+
+/* Header included at the beginning of firmware binary */
+struct vxd_fw_hdr {
+	unsigned int core_size;
+	unsigned int blob_size;
+	unsigned int firmware_id;
+	unsigned int timestamp;
+};
+
+/*
+ * struct vxd_dev_fw - Core component will allocate a buffer for firmware.
+ *                     This structure holds the information about the firmware
+ *                     binary.
+ * @buf_id: The buffer id allocation
+ * @hdr: firmware header information
+ * @fw_size: The size of the fw. Set after successful firmware request.
+ */
+struct vxd_dev_fw {
+	int buf_id;
+	struct vxd_fw_hdr *hdr;
+	unsigned int fw_size;
+	unsigned char ready;
+};
+
+/*
+ * struct vxd_core_props - contains HW core properties
+ * @core_rev: Core revision based on register CR_PVDEC_CORE_REV
+ * @pvdec_core_id: PVDEC Core id based on register CR_PVDEC_CORE_ID
+ * @mmu_config0: MMU configuration 0 based on register MMU_CONFIG0
+ * @mmu_config1: MMU configuration 1 based on register MMU_CONFIG1
+ * @mtx_ram_size: size of the MTX RAM based on register CR_PROC_DEBUG
+ * @pixel_max_frame_cfg: indicates the max frame height and width for
+ *                       PVDEC pipeline and MSVDX pipeline based on register
+ *                       MAX_FRAME_CONFIG
+ * @pixel_pipe_cfg: pipe configuration which codecs are supported in a
+ *                  Pixel Processing Pipe, based on register
+ *                  PIXEL_PIPE_CONFIG
+ * @pixel_misc_cfg: Additional pipe configuration eg. supported scaling
+ *                  or rotation, based on register PIXEL_MISC_CONFIG
+ * @dbg_fifo_size: contains the depth of the Debug FIFO, based on
+ *                 register CR_PROC_DEBUG_FIFO_SIZE
+ */
+struct vxd_core_props {
+	unsigned int core_rev;
+	unsigned int pvdec_core_id;
+	unsigned int mmu_config0;
+	unsigned int mmu_config1;
+	unsigned int mtx_ram_size;
+	unsigned int pixel_max_frame_cfg;
+	unsigned int pixel_pipe_cfg[VXD_MAX_PIPES];
+	unsigned int pixel_misc_cfg[VXD_MAX_PIPES];
+	unsigned int dbg_fifo_size;
+};
+
+struct vxd_alloc_data {
+	unsigned int heap_id;       /* [IN] Heap ID of allocator                */
+	unsigned int size;          /* [IN] Size of device memory (in bytes)    */
+	unsigned int attributes;    /* [IN] Attributes of buffer */
+	unsigned int buf_id;        /* [OUT] Generated buffer ID                */
+};
+
+struct vxd_free_data {
+	unsigned int buf_id;        /* [IN] ID of device buffer to free */
+};
+#endif /* _IMG_DEC_COMMON_H */
diff --git a/drivers/staging/media/vxd/decoder/vxd_pvdec.c b/drivers/staging/media/vxd/decoder/vxd_pvdec.c
new file mode 100644
index 000000000000..c2b59c3dd164
--- /dev/null
+++ b/drivers/staging/media/vxd/decoder/vxd_pvdec.c
@@ -0,0 +1,1745 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * IMG DEC PVDEC function implementations
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ *	Amit Makani <amit.makani@ti.com>
+ *
+ * Re-written for upstreamimg
+ *	Sidraya Jayagond <sidraya.bj@pathpartnertech.com>
+ *	Prashanth Kumar Amai <prashanth.ka@pathpartnertech.com>
+ */
+
+#include <linux/interrupt.h>
+#include <linux/time64.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/jiffies.h>
+#include <linux/dma-mapping.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-mem2mem.h>
+
+#include "img_dec_common.h"
+#include "img_pvdec_test_regs.h"
+#include "img_video_bus4_mmu_regs.h"
+#include "vxd_pvdec_priv.h"
+#include "vxd_pvdec_regs.h"
+
+#ifdef PVDEC_SINGLETHREADED_IO
+static DEFINE_SPINLOCK(pvdec_irq_lock);
+static ulong pvdec_irq_flags;
+#endif
+
+static const ulong vxd_plat_poll_udelay = 100;
+
+/* This function will return reminder and quotient */
+static inline unsigned int do_divide(unsigned long long *n, unsigned int base)
+{
+	unsigned int remainder = *n % base;
+	*n = *n / base;
+	return remainder;
+}
+
+/*
+ * Reads PROC_DEBUG register and provides number of MTX RAM banks
+ * and their size
+ */
+static int pvdec_get_mtx_ram_info(void __iomem *reg_base, int *bank_cnt,
+				  unsigned long *bank_size,
+				  unsigned long *last_bank_size)
+{
+	unsigned int ram_bank_count, reg;
+
+	reg = VXD_RD_REG(reg_base, PVDEC_CORE, PROC_DEBUG);
+	ram_bank_count = VXD_RD_REG_FIELD(reg, PVDEC_CORE, PROC_DEBUG, MTX_RAM_BANKS);
+	if (!ram_bank_count)
+		return -EIO;
+
+	if (bank_cnt)
+		*bank_cnt = ram_bank_count;
+
+	if (bank_size) {
+		unsigned int ram_bank_size = VXD_RD_REG_FIELD(reg, PVDEC_CORE,
+				PROC_DEBUG, MTX_RAM_BANK_SIZE);
+		*bank_size = 1 << (ram_bank_size + 2);
+	}
+
+	if (last_bank_size) {
+		unsigned int last_bank = VXD_RD_REG_FIELD(reg, PVDEC_CORE, PROC_DEBUG,
+				MTX_LAST_RAM_BANK_SIZE);
+		unsigned char new_representation = VXD_RD_REG_FIELD(reg,
+				PVDEC_CORE, PROC_DEBUG, MTX_RAM_NEW_REPRESENTATION);
+		if (new_representation) {
+			*last_bank_size = 1024 * last_bank;
+		} else {
+			*last_bank_size = 1 << (last_bank + 2);
+			if (bank_cnt && last_bank == 13 && *bank_cnt == 4) {
+				/*
+				 * VXD hardware ambiguity:
+				 * old cores confuse 120k and 128k
+				 * So assume worst case.
+				 */
+				*last_bank_size -= 0x2000;
+			}
+		}
+	}
+
+	return 0;
+}
+
+/* Provides size of MTX RAM in bytes */
+static int pvdec_get_mtx_ram_size(void __iomem *reg_base, unsigned int *ram_size)
+{
+	int bank_cnt, ret;
+	unsigned long bank_size, last_bank_size;
+
+	ret = pvdec_get_mtx_ram_info(reg_base, &bank_cnt, &bank_size, &last_bank_size);
+	if (ret)
+		return ret;
+
+	*ram_size = (bank_cnt - 1) * bank_size + last_bank_size;
+
+	return 0;
+}
+
+/* Poll for single register-based transfer to/from MTX to complete */
+static unsigned int pvdec_wait_mtx_reg_access(void __iomem *reg_base, unsigned int *mtx_fault)
+{
+	unsigned int pvdec_timeout = PVDEC_TIMEOUT_COUNTER, reg;
+
+	do {
+		/* Check MTX is OK */
+		reg = VXD_RD_REG(reg_base, MTX_CORE, MTX_FAULT0);
+		if (reg != 0) {
+			*mtx_fault = reg;
+			return -EIO;
+		}
+
+		pvdec_timeout--;
+		reg = VXD_RD_REG(reg_base, MTX_CORE, MTX_REG_READ_WRITE_REQUEST);
+	} while ((VXD_RD_REG_FIELD(reg, MTX_CORE,
+		MTX_REG_READ_WRITE_REQUEST,
+		MTX_DREADY) == 0) &&
+		(pvdec_timeout != 0));
+
+	if (pvdec_timeout == 0)
+		return -EIO;
+
+	return 0;
+}
+
+static void pvdec_mtx_status_dump(void __iomem *reg_base, unsigned int *status)
+{
+	unsigned int reg;
+
+	pr_debug("%s: *** dumping status ***\n", __func__);
+
+#define READ_MTX_REG(_NAME_) \
+	do { \
+		unsigned int val; \
+		VXD_WR_REG(reg_base, MTX_CORE, \
+			MTX_REG_READ_WRITE_REQUEST, reg); \
+		if (pvdec_wait_mtx_reg_access(reg_base, &reg)) { \
+			pr_debug("%s: " \
+				"MTX REG RD fault: 0x%08x\n", __func__, reg); \
+			break; \
+		} \
+		val = VXD_RD_REG(reg_base, MTX_CORE, MTX_REG_READ_WRITE_DATA); \
+		if (status) \
+			*status++ = val; \
+		pr_debug("%s: " _NAME_ ": 0x%08x\n", __func__, val); \
+	} while (0)
+
+	reg = 0;
+	reg = VXD_WR_REG_FIELD(reg, MTX_CORE, /* Read */
+			       MTX_REG_READ_WRITE_REQUEST, MTX_RNW, 1);
+	reg = VXD_WR_REG_FIELD(reg, MTX_CORE, /* PC or PCX */
+			       MTX_REG_READ_WRITE_REQUEST, MTX_USPECIFIER, 5);
+	reg = VXD_WR_REG_FIELD(reg, MTX_CORE, /* PC */
+			       MTX_REG_READ_WRITE_REQUEST, MTX_RSPECIFIER, 0);
+	READ_MTX_REG("MTX PC");
+
+	reg = 0;
+	reg = VXD_WR_REG_FIELD(reg, MTX_CORE, /* Read */
+			       MTX_REG_READ_WRITE_REQUEST, MTX_RNW, 1);
+	reg = VXD_WR_REG_FIELD(reg, MTX_CORE, /* PC or PCX */
+			       MTX_REG_READ_WRITE_REQUEST, MTX_USPECIFIER, 5);
+	reg = VXD_WR_REG_FIELD(reg, MTX_CORE, /* PCX */
+			       MTX_REG_READ_WRITE_REQUEST, MTX_RSPECIFIER, 1);
+	READ_MTX_REG("MTX PCX");
+
+	reg = 0;
+	reg = VXD_WR_REG_FIELD(reg, MTX_CORE, /* Read */
+			       MTX_REG_READ_WRITE_REQUEST, MTX_RNW, 1);
+	reg = VXD_WR_REG_FIELD(reg, MTX_CORE, /* A0StP */
+			       MTX_REG_READ_WRITE_REQUEST, MTX_USPECIFIER, 3);
+	reg = VXD_WR_REG_FIELD(reg, MTX_CORE,
+			       MTX_REG_READ_WRITE_REQUEST, MTX_RSPECIFIER, 0);
+	READ_MTX_REG("MTX A0STP");
+
+	reg = 0;
+	reg = VXD_WR_REG_FIELD(reg, MTX_CORE, /* Read */
+			       MTX_REG_READ_WRITE_REQUEST, MTX_RNW, 1);
+	reg = VXD_WR_REG_FIELD(reg, MTX_CORE, /* A0FrP */
+			       MTX_REG_READ_WRITE_REQUEST, MTX_USPECIFIER, 3);
+	reg = VXD_WR_REG_FIELD(reg, MTX_CORE, MTX_REG_READ_WRITE_REQUEST, MTX_RSPECIFIER, 1);
+	READ_MTX_REG("MTX A0FRP");
+#undef PRINT_MTX_REG
+
+	pr_debug("%s: *** status dump done ***\n", __func__);
+}
+
+static void pvdec_prep_fw_upload(const void *dev,
+				 void __iomem *reg_base,
+				 struct vxd_ena_params *ena_params,
+				 unsigned char dma_channel)
+{
+	unsigned int fw_vxd_virt_addr = ena_params->fw_buf_virt_addr;
+	unsigned int vxd_ptd_addr = ena_params->ptd;
+	unsigned int reg = 0;
+	int i;
+	unsigned int flags = PVDEC_FWFLAG_FORCE_FS_FLOW |
+		PVDEC_FWFLAG_DISABLE_GENC_FLUSHING |
+		PVDEC_FWFLAG_DISABLE_AUTONOMOUS_RESET |
+		PVDEC_FWFLAG_DISABLE_IDLE_GPIO |
+		PVDEC_FWFLAG_ENABLE_ERROR_CONCEALMENT;
+
+	if (ena_params->secure)
+		flags |= PVDEC_FWFLAG_BIG_TO_HOST_BUFFER;
+
+#ifdef DEBUG_DECODER_DRIVER
+	dev_dbg(dev, "%s: fw_virt: 0x%x, ptd: 0x%x, dma ch: %u, flags: 0x%x\n",
+		__func__, fw_vxd_virt_addr, vxd_ptd_addr, dma_channel, flags);
+#endif
+
+	/* Reset MTX */
+	reg = VXD_WR_REG_FIELD(reg, MTX_CORE, MTX_SOFT_RESET, MTX_RESET, 1);
+	VXD_WR_REG(reg_base, MTX_CORE, MTX_SOFT_RESET, reg);
+	/*
+	 * NOTE: The MTX reset bit is WRITE ONLY, so we cannot
+	 * check the reset procedure has finished, thus BEWARE to put
+	 * any MTX_CORE* access just after this line
+	 */
+
+	/* Clear COMMS RAM header */
+	for (i = 0; i < PVDEC_FW_COMMS_HDR_SIZE; i++)
+		VXD_WR_REG_ABS(reg_base, VLR_OFFSET + i * sizeof(unsigned int), 0);
+
+	VXD_WR_REG_ABS(reg_base, VLR_OFFSET + PVDEC_FW_FLAGS_OFFSET, flags);
+	/* Do not wait for debug FIFO flag - set it only when requested */
+	VXD_WR_REG_ABS(reg_base, VLR_OFFSET + PVDEC_FW_SIGNATURE_OFFSET,
+		       !ena_params->wait_dbg_fifo);
+
+	/*
+	 * Clear the bypass bits and enable extended addressing in MMU.
+	 * Firmware depends on this configuration, so we have to set it,
+	 * even if firmware is being uploaded via registers.
+	 */
+	reg = 0;
+	reg = VXD_WR_REG_FIELD(reg, IMG_VIDEO_BUS4_MMU, MMU_ADDRESS_CONTROL, UPPER_ADDR_FIXED, 0);
+	reg = VXD_WR_REG_FIELD(reg, IMG_VIDEO_BUS4_MMU, MMU_ADDRESS_CONTROL, MMU_ENA_EXT_ADDR, 1);
+	reg = VXD_WR_REG_FIELD(reg, IMG_VIDEO_BUS4_MMU, MMU_ADDRESS_CONTROL, MMU_BYPASS, 0);
+	VXD_WR_REG(reg_base, IMG_VIDEO_BUS4_MMU, MMU_ADDRESS_CONTROL, reg);
+
+	/*
+	 * Buffer device virtual address.
+	 * This is an address of a firmware blob, firmware reads this base
+	 * address from DMAC_SETUP register and uses to load the modules, so it
+	 * has to be set even when uploading the FW via registers.
+	 */
+	VXD_WR_RPT_REG(reg_base, DMAC, DMAC_SETUP, fw_vxd_virt_addr, dma_channel);
+
+	/*
+	 * Set base address of PTD. Same as before, has to be configured even
+	 * when uploading the firmware via regs, FW uses it to execute DMA
+	 * before switching to stream MMU context.
+	 */
+	VXD_WR_REG(reg_base, IMG_VIDEO_BUS4_MMU, MMU_DIR_BASE_ADDR, vxd_ptd_addr);
+
+	/* Configure MMU bank index - Use bank 0 */
+	VXD_WR_REG(reg_base, IMG_VIDEO_BUS4_MMU, MMU_BANK_INDEX, 0);
+
+	/* Set the MTX timer divider register */
+	reg = 0;
+	reg = VXD_WR_REG_FIELD(reg, MTX_CORE, MTX_SYSC_TIMERDIV, TIMER_EN, 1);
+	/*
+	 * Setting max freq - divide by 1 for better measurement accuracy
+	 * during fw upload stage
+	 */
+	reg = VXD_WR_REG_FIELD(reg, MTX_CORE, MTX_SYSC_TIMERDIV, TIMER_DIV, 0);
+	VXD_WR_REG(reg_base, MTX_CORE, MTX_SYSC_TIMERDIV, reg);
+}
+
+static int pvdec_check_fw_sig(void __iomem *reg_base)
+{
+	unsigned int fw_sig = VXD_RD_REG_ABS(reg_base, VLR_OFFSET +
+			PVDEC_FW_SIGNATURE_OFFSET);
+
+	if (fw_sig != PVDEC_FW_READY_SIG)
+		return -EIO;
+
+	return 0;
+}
+
+static void pvdec_kick_mtx(void __iomem *reg_base)
+{
+	unsigned int reg = 0;
+
+	reg = VXD_WR_REG_FIELD(reg, MTX_CORE, MTX_KICKI, MTX_KICKI, 1);
+	VXD_WR_REG(reg_base, MTX_CORE, MTX_KICKI, reg);
+}
+
+static int pvdec_write_vlr(void __iomem *reg_base, const unsigned int *buf,
+			   unsigned long size_dwrds, int off_dwrds)
+{
+	unsigned int i;
+
+	if (((off_dwrds + size_dwrds) * sizeof(unsigned int)) > VLR_SIZE)
+		return -EINVAL;
+
+	for (i = 0; i < size_dwrds; i++) {
+		int off = (off_dwrds + i) * sizeof(unsigned int);
+
+		VXD_WR_REG_ABS(reg_base, (VLR_OFFSET + off), *buf);
+		buf++;
+	}
+
+	return 0;
+}
+
+static int pvdec_poll_fw_boot(void __iomem *reg_base, struct vxd_boot_poll_params *poll_params)
+{
+	unsigned int i;
+
+	for (i = 0; i < 25; i++) {
+		if (!pvdec_check_fw_sig(reg_base))
+			return 0;
+		usleep_range(100, 110);
+	}
+	for (i = 0; i < poll_params->msleep_cycles; i++) {
+		if (!pvdec_check_fw_sig(reg_base))
+			return 0;
+		msleep(100);
+	}
+	return -EIO;
+}
+
+static int pvdec_read_vlr(void __iomem *reg_base, unsigned int *buf,
+			  unsigned long size_dwrds, int off_dwrds)
+{
+	unsigned int i;
+
+	if (((off_dwrds + size_dwrds) * sizeof(unsigned int)) > VLR_SIZE)
+		return -EINVAL;
+
+	for (i = 0; i < size_dwrds; i++) {
+		int off = (off_dwrds + i) * sizeof(unsigned int);
+		*buf++ = VXD_RD_REG_ABS(reg_base, (VLR_OFFSET + off));
+	}
+
+	return 0;
+}
+
+/* Get configuration of a ring buffer used to send messages to the MTX */
+static int pvdec_get_to_mtx_cfg(void __iomem *reg_base, unsigned long *size, int *off,
+				unsigned int *wr_idx, unsigned int *rd_idx)
+{
+	unsigned int to_mtx_cfg;
+	int to_mtx_off, ret;
+
+	ret = pvdec_check_fw_sig(reg_base);
+	if (ret)
+		return ret;
+
+	to_mtx_cfg = VXD_RD_REG_ABS(reg_base, VLR_OFFSET + PVDEC_FW_TO_MTX_BUF_CONF_OFFSET);
+
+	*size = PVDEC_FW_COM_BUF_SIZE(to_mtx_cfg);
+	to_mtx_off = PVDEC_FW_COM_BUF_OFF(to_mtx_cfg);
+
+	if (to_mtx_off % 4)
+		return -EIO;
+
+	to_mtx_off /= sizeof(unsigned int);
+	*off = to_mtx_off;
+
+	*wr_idx = VXD_RD_REG_ABS(reg_base, VLR_OFFSET + PVDEC_FW_TO_MTX_WR_IDX_OFFSET);
+	*rd_idx = VXD_RD_REG_ABS(reg_base, VLR_OFFSET + PVDEC_FW_TO_MTX_RD_IDX_OFFSET);
+
+	if ((*rd_idx >= *size) || (*wr_idx >= *size))
+		return -EIO;
+
+	return 0;
+}
+
+/* Submit a padding message to the host->MTX ring buffer */
+static int pvdec_send_pad_msg(void __iomem *reg_base)
+{
+	int ret, pad_size, to_mtx_off; /* offset in dwords */
+	unsigned int wr_idx, rd_idx; /* indicies in dwords */
+	unsigned long pad_msg_size = 1, to_mtx_size; /* size in dwords */
+	const unsigned long max_msg_size = VXD_MAX_PAYLOAD_SIZE / sizeof(unsigned int);
+	unsigned int pad_msg;
+
+	ret = pvdec_get_to_mtx_cfg(reg_base, &to_mtx_size, &to_mtx_off, &wr_idx, &rd_idx);
+	if (ret)
+		return ret;
+
+	pad_size = to_mtx_size - wr_idx; /* size in dwords */
+
+	if (pad_size <= 0) {
+		VXD_WR_REG_ABS(reg_base, VLR_OFFSET + PVDEC_FW_TO_MTX_WR_IDX_OFFSET, 0);
+		return 0;
+	}
+
+	while (pad_size > 0) {
+		int cur_pad_size = pad_size > max_msg_size ?
+			max_msg_size : pad_size;
+
+		pad_msg = 0;
+		pad_msg = VXD_WR_REG_FIELD(pad_msg, PVDEC_FW, DEVA_GENMSG, MSG_SIZE, cur_pad_size);
+		pad_msg = VXD_WR_REG_FIELD(pad_msg, PVDEC_FW, DEVA_GENMSG,
+					   MSG_TYPE, PVDEC_FW_MSG_TYPE_PADDING);
+
+		ret = pvdec_write_vlr(reg_base, &pad_msg, pad_msg_size, to_mtx_off + wr_idx);
+		if (ret)
+			return ret;
+
+		wr_idx += cur_pad_size;
+
+		VXD_WR_REG_ABS(reg_base, VLR_OFFSET + PVDEC_FW_TO_MTX_WR_IDX_OFFSET, wr_idx);
+
+		pad_size -= cur_pad_size;
+
+		pvdec_kick_mtx(reg_base);
+	}
+
+	wr_idx = 0;
+	VXD_WR_REG_ABS(reg_base, VLR_OFFSET + PVDEC_FW_TO_MTX_WR_IDX_OFFSET, wr_idx);
+
+	return 0;
+}
+
+/*
+ * Check if there is enough space in comms RAM to submit a <msg_size>
+ * dwords long message. Submit a padding message if necessary and requested.
+ *
+ * Returns 0 if there is space for a message.
+ * Returns -EINVAL when msg is too big or empty.
+ * Returns -EIO when there was a problem accessing the HW.
+ * Returns -EBUSY when there is not ennough space.
+ */
+static int pvdec_check_comms_space(void __iomem *reg_base, unsigned long msg_size,
+				   unsigned char send_padding)
+{
+	int ret, to_mtx_off; /* offset in dwords */
+	unsigned int wr_idx, rd_idx; /* indicies in dwords */
+	unsigned long to_mtx_size; /* size in dwords */
+
+	ret = pvdec_get_to_mtx_cfg(reg_base, &to_mtx_size, &to_mtx_off, &wr_idx, &rd_idx);
+	if (ret)
+		return ret;
+
+	/* Enormous or empty message, won't fit */
+	if (msg_size >= to_mtx_size || !msg_size)
+		return -EINVAL;
+
+	/* Buffer does not wrap */
+	if (wr_idx >= rd_idx) {
+		/* Is there enough space to put the message? */
+		if (wr_idx + msg_size < to_mtx_size)
+			return 0;
+
+		if (!send_padding)
+			return -EBUSY;
+
+		/* Check if it's ok to send a padding message */
+		if (rd_idx == 0)
+			return -EBUSY;
+
+		/* Send a padding message */
+		ret = pvdec_send_pad_msg(reg_base);
+		if (ret)
+			return ret;
+
+		/*
+		 * And check if there's enough space at the beginning
+		 * of a buffer
+		 */
+		if (msg_size >= rd_idx)
+			return -EBUSY; /* Not enough space at the beginning */
+
+	} else { /* Buffer wraps */
+		if (wr_idx + msg_size >= rd_idx)
+			return -EBUSY; /* Not enough space! */
+	}
+
+	return 0;
+}
+
+/* Get configuration of a ring buffer used to receive messages from the MTX */
+static int pvdec_get_to_host_cfg(void __iomem *reg_base, unsigned long *size, int *off,
+				 unsigned int *wr_idx, unsigned int *rd_idx)
+{
+	unsigned int to_host_cfg;
+	int to_host_off, ret;
+
+	ret = pvdec_check_fw_sig(reg_base);
+	if (ret)
+		return ret;
+
+	to_host_cfg = VXD_RD_REG_ABS(reg_base, VLR_OFFSET + PVDEC_FW_TO_HOST_BUF_CONF_OFFSET);
+
+	*size = PVDEC_FW_COM_BUF_SIZE(to_host_cfg);
+	to_host_off = PVDEC_FW_COM_BUF_OFF(to_host_cfg);
+
+	if (to_host_off % 4)
+		return -EIO;
+
+	to_host_off /= sizeof(unsigned int);
+	*off = to_host_off;
+
+	*wr_idx = VXD_RD_REG_ABS(reg_base, VLR_OFFSET + PVDEC_FW_TO_HOST_WR_IDX_OFFSET);
+	*rd_idx = VXD_RD_REG_ABS(reg_base, VLR_OFFSET + PVDEC_FW_TO_HOST_RD_IDX_OFFSET);
+
+	if ((*rd_idx >= *size) || (*wr_idx >= *size))
+		return -EIO;
+
+	return 0;
+}
+
+static void pvdec_select_pipe(void __iomem *reg_base, unsigned char pipe)
+{
+	unsigned int reg = 0;
+
+	reg = VXD_WR_REG_FIELD(reg, PVDEC_CORE, PVDEC_HOST_PIPE_SELECT, PIPE_SEL, pipe);
+	VXD_WR_REG(reg_base, PVDEC_CORE, PVDEC_HOST_PIPE_SELECT, reg);
+}
+
+static void pvdec_pre_boot_setup(const void *dev,
+				 void __iomem *reg_base,
+				 struct vxd_ena_params *ena_params)
+{
+	/* Memory staller pre boot settings */
+	if (ena_params->mem_staller.data) {
+		unsigned char size = ena_params->mem_staller.size;
+
+		if (size == PVDEC_CORE_MEMSTALLER_ELEMENTS) {
+			unsigned int *data = ena_params->mem_staller.data;
+
+#ifdef DEBUG_DECODER_DRIVER
+			dev_dbg(dev, "%s: Setting up memory staller", __func__);
+#endif
+			/*
+			 * Data structure represents PVDEC_TEST memory staller
+			 * registers according to TRM 5.25 section
+			 */
+			VXD_WR_REG(reg_base, PVDEC_TEST, MEM_READ_LATENCY, data[0]);
+			VXD_WR_REG(reg_base, PVDEC_TEST, MEM_WRITE_RESPONSE_LATENCY, data[1]);
+			VXD_WR_REG(reg_base, PVDEC_TEST, MEM_CTRL, data[2]);
+			VXD_WR_REG(reg_base, PVDEC_TEST, RAND_STL_MEM_CMD_CONFIG, data[3]);
+			VXD_WR_REG(reg_base, PVDEC_TEST, RAND_STL_MEM_WDATA_CONFIG, data[4]);
+			VXD_WR_REG(reg_base, PVDEC_TEST, RAND_STL_MEM_WRESP_CONFIG, data[5]);
+			VXD_WR_REG(reg_base, PVDEC_TEST, RAND_STL_MEM_RDATA_CONFIG, data[6]);
+		} else {
+			dev_warn(dev, "%s: Wrong layout of mem staller config (%u)!",
+				 __func__, size);
+		}
+	}
+}
+
+static void pvdec_post_boot_setup(const void *dev,
+				  void __iomem *reg_base,
+				  unsigned int freq_khz)
+{
+	int reg;
+
+	/*
+	 * Configure VXD MMU to use video tiles (256x16) and unique
+	 * strides per context as default. There is currently no
+	 * override mechanism.
+	 */
+	reg = VXD_RD_REG(reg_base, IMG_VIDEO_BUS4_MMU, MMU_CONTROL0);
+	reg = VXD_WR_REG_FIELD(reg, IMG_VIDEO_BUS4_MMU, MMU_CONTROL0,
+			       MMU_TILING_SCHEME, 0);
+	reg = VXD_WR_REG_FIELD(reg, IMG_VIDEO_BUS4_MMU, MMU_CONTROL0,
+			       USE_TILE_STRIDE_PER_CTX, 1);
+	VXD_WR_REG(reg_base, IMG_VIDEO_BUS4_MMU, MMU_CONTROL0, reg);
+
+	/*
+	 * Setup VXD MMU with the tile heap device virtual address
+	 * ranges.
+	 */
+	VXD_WR_RPT_REG(reg_base, IMG_VIDEO_BUS4_MMU, MMU_TILE_MIN_ADDR,
+		       PVDEC_HEAP_TILE512_START, 0);
+	VXD_WR_RPT_REG(reg_base, IMG_VIDEO_BUS4_MMU, MMU_TILE_MAX_ADDR,
+		       PVDEC_HEAP_TILE512_START + PVDEC_HEAP_TILE512_SIZE - 1, 0);
+	VXD_WR_RPT_REG(reg_base, IMG_VIDEO_BUS4_MMU, MMU_TILE_MIN_ADDR,
+		       PVDEC_HEAP_TILE1024_START, 1);
+	VXD_WR_RPT_REG(reg_base, IMG_VIDEO_BUS4_MMU, MMU_TILE_MAX_ADDR,
+		       PVDEC_HEAP_TILE1024_START + PVDEC_HEAP_TILE1024_SIZE - 1, 1);
+	VXD_WR_RPT_REG(reg_base, IMG_VIDEO_BUS4_MMU, MMU_TILE_MIN_ADDR,
+		       PVDEC_HEAP_TILE2048_START, 2);
+	VXD_WR_RPT_REG(reg_base, IMG_VIDEO_BUS4_MMU, MMU_TILE_MAX_ADDR,
+		       PVDEC_HEAP_TILE2048_START + PVDEC_HEAP_TILE2048_SIZE - 1, 2);
+	VXD_WR_RPT_REG(reg_base, IMG_VIDEO_BUS4_MMU, MMU_TILE_MIN_ADDR,
+		       PVDEC_HEAP_TILE4096_START, 3);
+	VXD_WR_RPT_REG(reg_base, IMG_VIDEO_BUS4_MMU, MMU_TILE_MAX_ADDR,
+		       PVDEC_HEAP_TILE4096_START + PVDEC_HEAP_TILE4096_SIZE - 1, 3);
+
+	/* Disable timer */
+	VXD_WR_REG(reg_base, MTX_CORE, MTX_SYSC_TIMERDIV, 0);
+
+	reg = 0;
+	if (freq_khz)
+		reg = VXD_WR_REG_FIELD(reg, MTX_CORE, MTX_SYSC_TIMERDIV, TIMER_DIV,
+				       PVDEC_CALC_TIMER_DIV(freq_khz / 1000));
+	else
+		reg = VXD_WR_REG_FIELD(reg, MTX_CORE, MTX_SYSC_TIMERDIV,
+				       TIMER_DIV, PVDEC_CLK_MHZ_DEFAULT - 1);
+
+	/* Enable the MTX timer with final settings */
+	reg = VXD_WR_REG_FIELD(reg, MTX_CORE, MTX_SYSC_TIMERDIV, TIMER_EN, 1);
+	VXD_WR_REG(reg_base, MTX_CORE, MTX_SYSC_TIMERDIV, reg);
+}
+
+static void pvdec_clock_measure(void __iomem *reg_base,
+				struct timespec64 *start_time,
+				unsigned int *start_ticks)
+{
+	local_irq_disable();
+	ktime_get_real_ts64(start_time);
+	*start_ticks = VXD_RD_REG(reg_base, MTX_CORE, MTX_SYSC_TXTIMER);
+	local_irq_enable();
+}
+
+static int pvdec_clock_calculate(const void *dev,
+				 void __iomem *reg_base,
+				 struct timespec64 *start_time,
+				 unsigned int start_ticks,
+				 unsigned int *freq_khz)
+{
+	struct timespec64 end_time, dif_time;
+	long long span_nsec = 0;
+	unsigned int stop_ticks, tot_ticks;
+
+	local_irq_disable();
+	ktime_get_real_ts64(&end_time);
+
+	stop_ticks = VXD_RD_REG(reg_base, MTX_CORE, MTX_SYSC_TXTIMER);
+	local_irq_enable();
+
+	*(struct timespec64 *)(&dif_time) = timespec64_sub(*((struct timespec64 *)(&end_time)),
+							  *((struct timespec64 *)(&start_time)));
+
+	span_nsec = timespec64_to_ns((const struct timespec64 *)&dif_time);
+
+	/* Sanity check for mtx timer */
+	if (!stop_ticks || stop_ticks < start_ticks) {
+		dev_err(dev, "%s: invalid ticks (0x%x -> 0x%x)\n",
+			__func__, start_ticks, stop_ticks);
+		return -EIO;
+	}
+	tot_ticks = stop_ticks - start_ticks;
+
+	if (span_nsec) {
+		unsigned long long res = (unsigned long long)tot_ticks * 1000000UL;
+
+		do_divide(&res, span_nsec);
+		*freq_khz = (unsigned int)res;
+		if (*freq_khz < 1000)
+			*freq_khz = 1000;   /* 1MHz */
+	} else {
+		dev_err(dev, "%s: generic failure!\n", __func__);
+		*freq_khz = 0;
+		return -ERANGE;
+	}
+
+	return 0;
+}
+
+static int pvdec_wait_dma_done(const void *dev,
+			       void __iomem *reg_base,
+			       unsigned long size,
+			       unsigned char dma_channel)
+{
+	unsigned int reg, timeout = PVDEC_TIMEOUT_COUNTER, prev_count, count = size;
+
+	do {
+		usleep_range(300, 310);
+		prev_count = count;
+		reg = VXD_RD_RPT_REG(reg_base, DMAC, DMAC_COUNT, dma_channel);
+		count = VXD_RD_REG_FIELD(reg, DMAC, DMAC_COUNT, CNT);
+		/* Check for dma progress */
+		if (count == prev_count) {
+			/* There could be a bus lag, protect against that */
+			timeout--;
+			if (timeout == 0) {
+				dev_err(dev, "%s FW DMA failed! (0x%x)\n", __func__, count);
+				return -EIO;
+			}
+		} else {
+			/* Reset timeout counter */
+			timeout = PVDEC_TIMEOUT_COUNTER;
+		}
+	} while (count > 0);
+
+	return 0;
+}
+
+static int pvdec_start_fw_dma(const void *dev,
+			      void __iomem *reg_base,
+			      unsigned char dma_channel,
+			      unsigned long fw_buf_size,
+			      unsigned int *freq_khz)
+{
+	unsigned int reg = 0;
+	int ret = 0;
+
+	fw_buf_size = fw_buf_size / sizeof(unsigned int);
+#ifdef DEBUG_DECODER_DRIVER
+	dev_dbg(dev, "%s: dma FW upload, fw_buf_size: %zu (dwords)\n", __func__, fw_buf_size);
+#endif
+
+	pvdec_select_pipe(reg_base, 1);
+
+	reg = VXD_RD_REG(reg_base, PVDEC_PIXEL, PIXEL_MAN_CLK_ENA);
+	reg = VXD_WR_REG_FIELD(reg, PVDEC_PIXEL, PIXEL_MAN_CLK_ENA, PIXEL_DMAC_MAN_CLK_ENA, 1);
+	reg = VXD_WR_REG_FIELD(reg, PVDEC_PIXEL, PIXEL_MAN_CLK_ENA, PIXEL_REG_MAN_CLK_ENA, 1);
+	VXD_WR_REG(reg_base, PVDEC_PIXEL, PIXEL_MAN_CLK_ENA, reg);
+
+	/*
+	 * Setup MTX to receive DMA
+	 * DMA transfers to/from the MTX have to be 32-bit aligned and
+	 * in multiples of 32 bits
+	 */
+	VXD_WR_REG(reg_base, MTX_CORE, MTX_SYSC_CDMAA, 0); /* MTX: 0x80900000 */
+
+	reg = 0;
+	/* Burst size in multiples of 64 bits (allowed values are 2 or 4) */
+	reg = VXD_WR_REG_FIELD(reg, MTX_CORE, MTX_SYSC_CDMAC, BURSTSIZE, 0);
+	/* 0 - write to MTX memory */
+	reg = VXD_WR_REG_FIELD(reg, MTX_CORE, MTX_SYSC_CDMAC, RNW, 0);
+	/* Begin transfer */
+	reg = VXD_WR_REG_FIELD(reg, MTX_CORE, MTX_SYSC_CDMAC, ENABLE, 1);
+	/* Transfer size */
+	reg = VXD_WR_REG_FIELD(reg, MTX_CORE, MTX_SYSC_CDMAC, LENGTH,
+			       ((fw_buf_size + 7) & (~7)) + 8);
+	VXD_WR_REG(reg_base, MTX_CORE, MTX_SYSC_CDMAC, reg);
+
+	/* Boot MTX once transfer is done */
+	reg = 0;
+	reg = VXD_WR_REG_FIELD(reg, PVDEC_CORE, PROC_DMAC_CONTROL,
+			       BOOT_ON_DMA_CH0, 1);
+	VXD_WR_REG(reg_base, PVDEC_CORE, PROC_DMAC_CONTROL, reg);
+
+	/* Toggle channel 0 usage between MTX and other PVDEC peripherals */
+	reg = 0;
+	reg = VXD_WR_REG_FIELD(reg, PVDEC_PIXEL, PIXEL_CONTROL_0,
+			       DMAC_CH_SEL_FOR_MTX, 0);
+	VXD_WR_REG(reg_base, PVDEC_PIXEL, PIXEL_CONTROL_0, reg);
+
+	/* Reset DMA channel first */
+	reg = 0;
+	reg = VXD_WR_REG_FIELD(reg, DMAC, DMAC_COUNT, SRST, 1);
+	VXD_WR_RPT_REG(reg_base, DMAC, DMAC_COUNT, reg, dma_channel);
+
+	reg = VXD_WR_REG_FIELD(reg, DMAC, DMAC_COUNT, LIST_EN, 0);
+	reg = VXD_WR_REG_FIELD(reg, DMAC, DMAC_COUNT, CNT, 0);
+	reg = VXD_WR_REG_FIELD(reg, DMAC, DMAC_COUNT, EN, 0);
+	VXD_WR_RPT_REG(reg_base, DMAC, DMAC_COUNT, reg, dma_channel);
+
+	reg = VXD_WR_REG_FIELD(reg, DMAC, DMAC_COUNT, SRST, 0);
+	VXD_WR_RPT_REG(reg_base, DMAC, DMAC_COUNT, reg, dma_channel);
+
+	/*
+	 * Setup a Simple DMA for Ch0
+	 * Specify the holdover period to use for the channel
+	 */
+	reg = 0;
+	reg = VXD_WR_REG_FIELD(reg, DMAC, DMAC_PER_HOLD, PER_HOLD, 7);
+	VXD_WR_RPT_REG(reg_base, DMAC, DMAC_PER_HOLD, reg, dma_channel);
+
+	/* Clear the DMAC Stats */
+	VXD_WR_RPT_REG(reg_base, DMAC, DMAC_IRQ_STAT, 0, dma_channel);
+
+	reg = 0;
+	reg = VXD_WR_REG_FIELD(reg, DMAC, DMAC_PERIPH_ADDR, ADDR,
+			       MTX_CORE_MTX_SYSC_CDMAT_OFFSET);
+	VXD_WR_RPT_REG(reg_base, DMAC, DMAC_PERIPH_ADDR, reg, dma_channel);
+
+	/* Clear peripheral register address */
+	reg = 0;
+	reg = VXD_WR_REG_FIELD(reg, DMAC, DMAC_PERIPH, ACC_DEL, 0);
+	reg = VXD_WR_REG_FIELD(reg, DMAC, DMAC_PERIPH, INCR, DMAC_INCR_OFF);
+	reg = VXD_WR_REG_FIELD(reg, DMAC, DMAC_PERIPH, BURST, DMAC_BURST_1);
+	reg = VXD_WR_REG_FIELD(reg, DMAC, DMAC_PERIPH, EXT_BURST, DMAC_EXT_BURST_0);
+	reg = VXD_WR_REG_FIELD(reg, DMAC, DMAC_PERIPH, EXT_SA, 0);
+	VXD_WR_RPT_REG(reg_base, DMAC, DMAC_PERIPH, reg, dma_channel);
+
+	/*
+	 * Now start the transfer by setting the list enable bit in
+	 * the count register
+	 */
+	reg = 0;
+	reg = VXD_WR_REG_FIELD(reg, DMAC, DMAC_COUNT, TRANSFER_IEN, 1);
+	reg = VXD_WR_REG_FIELD(reg, DMAC, DMAC_COUNT, PW, DMAC_PWIDTH_32_BIT);
+	reg = VXD_WR_REG_FIELD(reg, DMAC, DMAC_COUNT, DIR, DMAC_MEM_TO_VXD);
+	reg = VXD_WR_REG_FIELD(reg, DMAC, DMAC_COUNT, PI, DMAC_INCR_ON);
+	reg = VXD_WR_REG_FIELD(reg, DMAC, DMAC_COUNT, LIST_FIN_CTL, 0);
+	reg = VXD_WR_REG_FIELD(reg, DMAC, DMAC_COUNT, LIST_EN, 0);
+	reg = VXD_WR_REG_FIELD(reg, DMAC, DMAC_COUNT, ENABLE_2D_MODE, 0);
+	reg = VXD_WR_REG_FIELD(reg, DMAC, DMAC_COUNT, CNT, fw_buf_size);
+	VXD_WR_RPT_REG(reg_base, DMAC, DMAC_COUNT, reg, dma_channel);
+
+	reg = VXD_WR_REG_FIELD(reg, DMAC, DMAC_COUNT, EN, 1);
+	VXD_WR_RPT_REG(reg_base, DMAC, DMAC_COUNT, reg, dma_channel);
+
+	/* NOTE: The MTX timer starts once DMA boot is triggered */
+	{
+		struct timespec64 host_time;
+		unsigned int mtx_time;
+
+		pvdec_clock_measure(reg_base, &host_time, &mtx_time);
+
+		ret = pvdec_wait_dma_done(dev, reg_base, fw_buf_size, dma_channel);
+		if (!ret) {
+			if (pvdec_clock_calculate(dev, reg_base, &host_time, mtx_time,
+						  freq_khz) < 0)
+				dev_dbg(dev, "%s: measure info not available!\n", __func__);
+		}
+	}
+
+	return ret;
+}
+
+static int pvdec_set_clocks(void __iomem *reg_base, unsigned int req_clocks)
+{
+	unsigned int clocks = 0, reg;
+	unsigned int pvdec_timeout;
+
+	/* Turn on core clocks only */
+	clocks = VXD_WR_REG_FIELD(clocks, PVDEC_CORE, PVDEC_MAN_CLK_ENA,
+				  PVDEC_REG_MAN_CLK_ENA, 1);
+	clocks = VXD_WR_REG_FIELD(clocks, PVDEC_CORE, PVDEC_MAN_CLK_ENA, CORE_MAN_CLK_ENA, 1);
+
+	/* Wait until core clocks set */
+	pvdec_timeout = PVDEC_TIMEOUT_COUNTER;
+	do {
+		VXD_WR_REG(reg_base, PVDEC_CORE, PVDEC_MAN_CLK_ENA, clocks);
+		udelay(vxd_plat_poll_udelay);
+		reg = VXD_RD_REG(reg_base, PVDEC_CORE, PVDEC_MAN_CLK_ENA);
+		pvdec_timeout--;
+	} while (reg != clocks && pvdec_timeout != 0);
+
+	if (pvdec_timeout == 0)
+		return -EIO;
+
+	/* Write requested clocks */
+	VXD_WR_REG(reg_base, PVDEC_CORE, PVDEC_MAN_CLK_ENA, req_clocks);
+
+	return 0;
+}
+
+static int pvdec_enable_clocks(void __iomem *reg_base)
+{
+	unsigned int clocks = 0;
+
+	clocks = VXD_WR_REG_FIELD(clocks, PVDEC_CORE, PVDEC_MAN_CLK_ENA,
+				  PVDEC_REG_MAN_CLK_ENA, 1);
+	clocks = VXD_WR_REG_FIELD(clocks, PVDEC_CORE, PVDEC_MAN_CLK_ENA,
+				  CORE_MAN_CLK_ENA, 1);
+	clocks = VXD_WR_REG_FIELD(clocks, PVDEC_CORE, PVDEC_MAN_CLK_ENA,
+				  MEM_MAN_CLK_ENA, 1);
+	clocks = VXD_WR_REG_FIELD(clocks, PVDEC_CORE, PVDEC_MAN_CLK_ENA,
+				  PROC_MAN_CLK_ENA, 1);
+	clocks = VXD_WR_REG_FIELD(clocks, PVDEC_CORE, PVDEC_MAN_CLK_ENA,
+				  PIXEL_PROC_MAN_CLK_ENA, 1);
+
+	return pvdec_set_clocks(reg_base, clocks);
+}
+
+static int pvdec_disable_clocks(void __iomem *reg_base)
+{
+	return pvdec_set_clocks(reg_base, 0);
+}
+
+static void pvdec_ena_mtx_int(void __iomem *reg_base)
+{
+	unsigned int reg = VXD_RD_REG(reg_base, PVDEC_CORE, PVDEC_HOST_INT_ENA);
+
+	reg = VXD_WR_REG_FIELD(reg, PVDEC_CORE, PVDEC_INT_STAT, HOST_PROC_IRQ, 1);
+	reg = VXD_WR_REG_FIELD(reg, PVDEC_CORE, PVDEC_INT_STAT, HOST_MMU_FAULT_IRQ, 1);
+	VXD_WR_REG(reg_base, PVDEC_CORE, PVDEC_HOST_INT_ENA, reg);
+}
+
+static void pvdec_check_mmu_requests(void __iomem *reg_base,
+				     unsigned int mmu_checks,
+				     unsigned int max_attempts)
+{
+	unsigned int reg, i, checks = 0;
+
+	for (i = 0; i < max_attempts; i++) {
+		reg = VXD_RD_REG(reg_base,
+				 IMG_VIDEO_BUS4_MMU, MMU_MEM_REQ);
+		reg = VXD_RD_REG_FIELD(reg, IMG_VIDEO_BUS4_MMU, MMU_MEM_REQ, TAG_OUTSTANDING);
+		if (reg) {
+			udelay(vxd_plat_poll_udelay);
+			continue;
+		}
+
+		/* Read READ_WORDS_OUTSTANDING */
+		reg = VXD_RD_REG(reg_base, IMG_VIDEO_BUS4_MMU, MMU_MEM_EXT_OUTSTANDING);
+		reg = VXD_RD_REG_FIELD(reg, IMG_VIDEO_BUS4_MMU, MMU_MEM_EXT_OUTSTANDING,
+				       READ_WORDS);
+		if (!reg) {
+			checks++;
+			if (checks == mmu_checks)
+				break;
+		} else { /* Reset the counter and continue */
+			checks = 0;
+		}
+	}
+
+	if (checks != mmu_checks)
+		pr_warn("Checking for MMU outstanding requests failed!\n");
+}
+
+static int pvdec_reset(void __iomem *reg_base, unsigned char skip_pipe_clocks)
+{
+	unsigned int reg = 0;
+	unsigned char pipe, num_ent_pipes, num_pix_pipes;
+	unsigned int core_id, pvdec_timeout;
+
+	core_id = VXD_RD_REG(reg_base, PVDEC_CORE, PVDEC_CORE_ID);
+
+	num_ent_pipes = VXD_RD_REG_FIELD(core_id, PVDEC_CORE, PVDEC_CORE_ID, ENT_PIPES);
+	num_pix_pipes = VXD_RD_REG_FIELD(core_id, PVDEC_CORE, PVDEC_CORE_ID, PIX_PIPES);
+
+	if (num_pix_pipes == 0 || num_pix_pipes > VXD_MAX_PIPES)
+		return -EINVAL;
+
+	/* Clear interrupt enabled flag */
+	VXD_WR_REG(reg_base, PVDEC_CORE, PVDEC_HOST_INT_ENA, 0);
+
+	/* Clear any pending interrupt flags */
+	reg = 0;
+	reg = VXD_WR_REG_FIELD(reg, PVDEC_CORE, PVDEC_INT_CLEAR, IRQ_CLEAR, 0xFFFF);
+	VXD_WR_REG(reg_base, PVDEC_CORE, PVDEC_INT_CLEAR, reg);
+
+	/* Turn all clocks on - don't touch reserved bits! */
+	pvdec_set_clocks(reg_base, 0xFFFF0113);
+
+	if (!skip_pipe_clocks) {
+		for (pipe = 1; pipe <= num_pix_pipes; pipe++) {
+			pvdec_select_pipe(reg_base, pipe);
+			/* Turn all available clocks on - skip reserved bits! */
+			VXD_WR_REG(reg_base, PVDEC_PIXEL, PIXEL_MAN_CLK_ENA, 0xFFBF0FFF);
+		}
+
+		for (pipe = 1; pipe <= num_ent_pipes; pipe++) {
+			pvdec_select_pipe(reg_base, pipe);
+			/* Turn all available clocks on - skip reserved bits! */
+			VXD_WR_REG(reg_base, PVDEC_ENTROPY, ENTROPY_MAN_CLK_ENA, 0x5);
+		}
+	}
+
+	/* 1st MMU outstanding requests check */
+	pvdec_check_mmu_requests(reg_base, 1000, 2000);
+
+	/* Make sure MMU is not under reset MMU_SOFT_RESET -> 0 */
+	pvdec_timeout = PVDEC_TIMEOUT_COUNTER;
+	do {
+		reg = VXD_RD_REG(reg_base, IMG_VIDEO_BUS4_MMU, MMU_CONTROL1);
+		reg = VXD_RD_REG_FIELD(reg, IMG_VIDEO_BUS4_MMU, MMU_CONTROL1, MMU_SOFT_RESET);
+		udelay(vxd_plat_poll_udelay);
+		pvdec_timeout--;
+	} while (reg != 0 && pvdec_timeout != 0);
+
+	if (pvdec_timeout == 0) {
+		pr_err("Waiting for MMU soft reset(1) timed out!\n");
+		pvdec_mtx_status_dump(reg_base, NULL);
+	}
+
+	/* Write 1 to MMU_PAUSE_SET */
+	reg = 0;
+	reg = VXD_WR_REG_FIELD(reg, IMG_VIDEO_BUS4_MMU, MMU_CONTROL1, MMU_PAUSE_SET, 1);
+	VXD_WR_REG(reg_base, IMG_VIDEO_BUS4_MMU, MMU_CONTROL1, reg);
+
+	/* 2nd MMU outstanding requests check */
+	pvdec_check_mmu_requests(reg_base, 100, 1000);
+
+	/* Issue software reset for all but MMU/core */
+	reg = 0;
+	reg = VXD_WR_REG_FIELD(reg, PVDEC_CORE, PVDEC_SOFT_RST, PVDEC_PIXEL_PROC_SOFT_RST, 0xFF);
+	reg = VXD_WR_REG_FIELD(reg, PVDEC_CORE, PVDEC_SOFT_RST, PVDEC_ENTROPY_SOFT_RST, 0xFF);
+	VXD_WR_REG(reg_base, PVDEC_CORE, PVDEC_SOFT_RST, reg);
+
+	VXD_RD_REG(reg_base, PVDEC_CORE, PVDEC_SOFT_RST);
+	VXD_WR_REG(reg_base, PVDEC_CORE, PVDEC_SOFT_RST, 0);
+
+	/* Write 1 to MMU_PAUSE_CLEAR in MMU_CONTROL1 reg */
+	reg = 0;
+	reg = VXD_WR_REG_FIELD(reg, IMG_VIDEO_BUS4_MMU, MMU_CONTROL1, MMU_PAUSE_CLEAR, 1);
+	VXD_WR_REG(reg_base, IMG_VIDEO_BUS4_MMU, MMU_CONTROL1, reg);
+
+	/* Confirm MMU_PAUSE_SET is cleared */
+	pvdec_timeout = PVDEC_TIMEOUT_COUNTER;
+	do {
+		reg = VXD_RD_REG(reg_base, IMG_VIDEO_BUS4_MMU, MMU_CONTROL1);
+		reg = VXD_RD_REG_FIELD(reg, IMG_VIDEO_BUS4_MMU, MMU_CONTROL1, MMU_PAUSE_SET);
+		udelay(vxd_plat_poll_udelay);
+		pvdec_timeout--;
+	} while (reg != 0 && pvdec_timeout != 0);
+
+	if (pvdec_timeout == 0) {
+		pr_err("Waiting for MMU pause clear timed out!\n");
+		pvdec_mtx_status_dump(reg_base, NULL);
+		return -EIO;
+	}
+
+	/* Issue software reset for MMU */
+	reg = 0;
+	reg = VXD_WR_REG_FIELD(reg, IMG_VIDEO_BUS4_MMU, MMU_CONTROL1, MMU_SOFT_RESET, 1);
+	VXD_WR_REG(reg_base, IMG_VIDEO_BUS4_MMU, MMU_CONTROL1, reg);
+
+	/* Wait until MMU_SOFT_RESET -> 0 */
+	pvdec_timeout = PVDEC_TIMEOUT_COUNTER;
+	do {
+		reg = VXD_RD_REG(reg_base, IMG_VIDEO_BUS4_MMU, MMU_CONTROL1);
+		reg = VXD_RD_REG_FIELD(reg, IMG_VIDEO_BUS4_MMU, MMU_CONTROL1, MMU_SOFT_RESET);
+		udelay(vxd_plat_poll_udelay);
+		pvdec_timeout--;
+	} while (reg != 0 && pvdec_timeout != 0);
+
+	if (pvdec_timeout == 0) {
+		pr_err("Waiting for MMU soft reset(2) timed out!\n");
+		pvdec_mtx_status_dump(reg_base, NULL);
+	}
+
+	/* Issue software reset for entire PVDEC */
+	reg = 0;
+	reg = VXD_WR_REG_FIELD(reg, PVDEC_CORE, PVDEC_SOFT_RST, PVDEC_SOFT_RST, 0x1);
+	VXD_WR_REG(reg_base, PVDEC_CORE, PVDEC_SOFT_RST, reg);
+
+	/* Waiting for reset bit to be cleared */
+	pvdec_timeout = PVDEC_TIMEOUT_COUNTER;
+	do {
+		reg = VXD_RD_REG(reg_base, PVDEC_CORE, PVDEC_SOFT_RST);
+		reg = VXD_RD_REG_FIELD(reg, PVDEC_CORE, PVDEC_SOFT_RST, PVDEC_SOFT_RST);
+		udelay(vxd_plat_poll_udelay);
+		pvdec_timeout--;
+	} while (reg != 0 && pvdec_timeout != 0);
+
+	if (pvdec_timeout == 0) {
+		pr_err("Waiting for PVDEC soft reset timed out!\n");
+		pvdec_mtx_status_dump(reg_base, NULL);
+		return -EIO;
+	}
+
+	/* Clear interrupt enabled flag */
+	VXD_WR_REG(reg_base, PVDEC_CORE, PVDEC_HOST_INT_ENA, 0);
+
+	/* Clear any pending interrupt flags */
+	reg = 0;
+	reg = VXD_WR_REG_FIELD(reg, PVDEC_CORE, PVDEC_INT_CLEAR, IRQ_CLEAR, 0xFFFF);
+	VXD_WR_REG(reg_base, PVDEC_CORE, PVDEC_INT_CLEAR, reg);
+	return 0;
+}
+
+static int pvdec_get_properties(void __iomem *reg_base,
+				struct vxd_core_props *props)
+{
+	unsigned int major, minor, maint, group_id, core_id;
+	unsigned char num_pix_pipes, pipe;
+
+	if (!props)
+		return -EINVAL;
+
+	/* PVDEC Core Revision Information */
+	props->core_rev = VXD_RD_REG(reg_base, PVDEC_CORE, PVDEC_CORE_REV);
+	major = VXD_RD_REG_FIELD(props->core_rev, PVDEC_CORE, PVDEC_CORE_REV, PVDEC_MAJOR_REV);
+	minor = VXD_RD_REG_FIELD(props->core_rev, PVDEC_CORE, PVDEC_CORE_REV, PVDEC_MINOR_REV);
+	maint = VXD_RD_REG_FIELD(props->core_rev, PVDEC_CORE, PVDEC_CORE_REV, PVDEC_MAINT_REV);
+
+	/* Core ID */
+	props->pvdec_core_id = VXD_RD_REG(reg_base, PVDEC_CORE, PVDEC_CORE_ID);
+	group_id = VXD_RD_REG_FIELD(props->pvdec_core_id, PVDEC_CORE, PVDEC_CORE_ID, GROUP_ID);
+	core_id = VXD_RD_REG_FIELD(props->pvdec_core_id, PVDEC_CORE, PVDEC_CORE_ID, CORE_ID);
+
+	/* Ensure that the core is IMG Video Decoder (PVDEC). */
+	if (group_id != 3 || core_id != 3) {
+		pr_err("Wrong core revision %d.%d.%d !!!\n", major, minor, maint);
+		return -EIO;
+	}
+
+	props->mmu_config0 = VXD_RD_REG(reg_base, IMG_VIDEO_BUS4_MMU, MMU_CONFIG0);
+	props->mmu_config1 = VXD_RD_REG(reg_base, IMG_VIDEO_BUS4_MMU, MMU_CONFIG1);
+
+	num_pix_pipes = VXD_NUM_PIX_PIPES(*props);
+
+	if (unlikely(num_pix_pipes > VXD_MAX_PIPES)) {
+		pr_warn("Too many pipes detected!\n");
+		num_pix_pipes = VXD_MAX_PIPES;
+	}
+
+	for (pipe = 1; pipe <= num_pix_pipes; ++pipe) {
+		pvdec_select_pipe(reg_base, pipe);
+		if (pipe < VXD_MAX_PIPES) {
+			props->pixel_pipe_cfg[pipe - 1] =
+				VXD_RD_REG(reg_base, PVDEC_PIXEL, PIXEL_PIPE_CONFIG);
+			props->pixel_misc_cfg[pipe - 1] =
+				VXD_RD_REG(reg_base, PVDEC_PIXEL, PIXEL_MISC_CONFIG);
+			/*
+			 * Detect pipe access problems.
+			 * Pipe config shall always indicate
+			 * a non zero value (at least one standard supported)!
+			 */
+			if (!props->pixel_pipe_cfg[pipe - 1])
+				pr_warn("Pipe config info is wrong!\n");
+		}
+	}
+
+	pvdec_select_pipe(reg_base, 1);
+	props->pixel_max_frame_cfg = VXD_RD_REG(reg_base, PVDEC_PIXEL, MAX_FRAME_CONFIG);
+
+	{
+		unsigned int fifo_ctrl = VXD_RD_REG(reg_base, PVDEC_CORE, PROC_DBG_FIFO_CTRL0);
+
+		props->dbg_fifo_size = VXD_RD_REG_FIELD(fifo_ctrl,
+							PVDEC_CORE,
+							PROC_DBG_FIFO_CTRL0,
+							PROC_DBG_FIFO_SIZE);
+	}
+
+	return 0;
+}
+
+int vxd_pvdec_init(const void *dev, void __iomem *reg_base)
+{
+	int ret;
+
+#ifdef DEBUG_DECODER_DRIVER
+	dev_dbg(dev, "%s: trying to reset VXD, reg base: %p\n", __func__, reg_base);
+#endif
+
+	ret = pvdec_enable_clocks(reg_base);
+	if (ret) {
+		dev_err(dev, "%s: failed to enable clocks!\n", __func__);
+		return ret;
+	}
+
+	ret = pvdec_reset(reg_base, FALSE);
+	if (ret) {
+		dev_err(dev, "%s: VXD reset failed!\n", __func__);
+		return ret;
+	}
+
+	pvdec_ena_mtx_int(reg_base);
+
+	return 0;
+}
+
+/* Send <msg_size> dwords long message */
+int vxd_pvdec_send_msg(const void *dev,
+		       void __iomem *reg_base,
+		       unsigned int *msg,
+		       unsigned long msg_size,
+		       unsigned short msg_id,
+		       struct vxd_dev *ctx)
+{
+	int ret, to_mtx_off; /* offset in dwords */
+	unsigned int wr_idx, rd_idx; /* indicies in dwords */
+	unsigned long to_mtx_size; /* size in dwords */
+	unsigned int msg_wrd;
+	struct timespec64 time;
+	static int cnt;
+
+	ktime_get_real_ts64(&time);
+
+	ctx->time_fw[cnt].start_time = timespec64_to_ns((const struct timespec64 *)&time);
+	ctx->time_fw[cnt].id = msg_id;
+	cnt++;
+
+	if (cnt >= ARRAY_SIZE(ctx->time_fw))
+		cnt = 0;
+
+	ret = pvdec_get_to_mtx_cfg(reg_base, &to_mtx_size, &to_mtx_off, &wr_idx, &rd_idx);
+	if (ret) {
+		dev_err(dev, "%s: failed to obtain mtx ring buffer config!\n", __func__);
+		return ret;
+	}
+
+	/* populate the size and id fields in the message header */
+	msg_wrd = VXD_RD_MSG_WRD(msg, PVDEC_FW, DEVA_GENMSG);
+	msg_wrd = VXD_WR_REG_FIELD(msg_wrd, PVDEC_FW, DEVA_GENMSG, MSG_SIZE, msg_size);
+	msg_wrd = VXD_WR_REG_FIELD(msg_wrd, PVDEC_FW, DEVA_GENMSG, MSG_ID, msg_id);
+	VXD_WR_MSG_WRD(msg, PVDEC_FW, DEVA_GENMSG, msg_wrd);
+
+#ifdef DEBUG_DECODER_DRIVER
+	dev_dbg(dev, "%s: [msg out] size: %zu, id: 0x%x, type: 0x%x\n", __func__, msg_size, msg_id,
+		VXD_RD_REG_FIELD(msg_wrd, PVDEC_FW, DEVA_GENMSG, MSG_TYPE));
+	dev_dbg(dev, "%s: to_mtx: (%zu @ %d), wr_idx: %d, rd_idx: %d\n",
+		__func__, to_mtx_size, to_mtx_off, wr_idx, rd_idx);
+#endif
+
+	ret = pvdec_check_comms_space(reg_base, msg_size, FALSE);
+	if (ret) {
+		dev_err(dev, "%s: invalid message or not enough space (%d)!\n", __func__, ret);
+		return ret;
+	}
+
+	ret = pvdec_write_vlr(reg_base, msg, msg_size, to_mtx_off + wr_idx);
+	if (ret) {
+		dev_err(dev, "%s: failed to write msg to vlr!\n", __func__);
+		return ret;
+	}
+
+	wr_idx += msg_size;
+	if (wr_idx == to_mtx_size)
+		wr_idx = 0;
+	VXD_WR_REG_ABS(reg_base, VLR_OFFSET +
+		PVDEC_FW_TO_MTX_WR_IDX_OFFSET, wr_idx);
+
+	pvdec_kick_mtx(reg_base);
+
+	return 0;
+}
+
+/* Fetch size (in dwords) of message pending from MTX */
+int vxd_pvdec_pend_msg_info(const void *dev, void __iomem *reg_base,
+			    unsigned long *size,
+			    unsigned short *msg_id,
+			    unsigned char *not_last_msg)
+{
+	int ret, to_host_off; /* offset in dwords */
+	unsigned int wr_idx, rd_idx; /* indicies in dwords */
+	unsigned long to_host_size; /* size in dwords */
+	unsigned int val = 0;
+
+	ret = pvdec_get_to_host_cfg(reg_base, &to_host_size, &to_host_off, &wr_idx, &rd_idx);
+	if (ret) {
+		dev_err(dev, "%s: failed to obtain host ring buffer config!\n", __func__);
+		return ret;
+	}
+
+#ifdef DEBUG_DECODER_DRIVER
+	dev_dbg(dev, "%s: to host: (%zu @ %d), wr: %u, rd: %u\n", __func__,
+		to_host_size, to_host_off, wr_idx, rd_idx);
+#endif
+
+	if (wr_idx == rd_idx) {
+		*size = 0;
+		*msg_id = 0;
+		return 0;
+	}
+
+	ret = pvdec_read_vlr(reg_base, &val, 1, to_host_off + rd_idx);
+	if (ret) {
+		dev_err(dev, "%s: failed to read first word!\n", __func__);
+		return ret;
+	}
+
+	*size = VXD_RD_REG_FIELD(val, PVDEC_FW, DEVA_GENMSG, MSG_SIZE);
+	*msg_id = VXD_RD_REG_FIELD(val, PVDEC_FW, DEVA_GENMSG, MSG_ID);
+	*not_last_msg = VXD_RD_REG_FIELD(val, PVDEC_FW, DEVA_GENMSG, NOT_LAST_MSG);
+
+#ifdef DEBUG_DECODER_DRIVER
+	dev_dbg(dev, "%s: [msg in] rd_idx: %d, size: %zu, id: 0x%04x, type: 0x%x\n",
+		__func__, rd_idx, *size, *msg_id,
+		VXD_RD_REG_FIELD(val, PVDEC_FW, DEVA_GENMSG, MSG_TYPE));
+#endif
+
+	return 0;
+}
+
+/*
+ * Receive message from the MTX and place it in a <buf_size> dwords long
+ * buffer. If the provided buffer is too small to hold the message, only part
+ * of it will be placed in a buffer, but the ring buffer read index will be
+ * moved so that message is no longer available.
+ */
+int vxd_pvdec_recv_msg(const void *dev, void __iomem *reg_base,
+		       unsigned int *buf,
+		       unsigned long buf_size,
+		       struct vxd_dev *vxd)
+{
+	int ret, to_host_off; /* offset in dwords */
+	unsigned int wr_idx, rd_idx; /* indicies in dwords */
+	unsigned long to_host_size, msg_size, to_read; /* sizes in dwords */
+	unsigned int val = 0;
+	struct timespec64 time;
+	unsigned short msg_id;
+	int loop;
+
+	ret = pvdec_get_to_host_cfg(reg_base, &to_host_size,
+				    &to_host_off, &wr_idx, &rd_idx);
+	if (ret) {
+		dev_err(dev, "%s: failed to obtain host ring buffer config!\n", __func__);
+		return ret;
+	}
+
+#ifdef DEBUG_DECODER_DRIVER
+	dev_dbg(dev, "%s: to host: (%zu @ %d), wr: %u, rd: %u\n", __func__,
+		to_host_size, to_host_off, wr_idx, rd_idx);
+#endif
+
+	/* Obtain the message size */
+	ret = pvdec_read_vlr(reg_base, &val, 1, to_host_off + rd_idx);
+	if (ret) {
+		dev_err(dev, "%s: failed to read first word!\n", __func__);
+		return ret;
+	}
+	msg_size = VXD_RD_REG_FIELD(val, PVDEC_FW, DEVA_GENMSG, MSG_SIZE);
+
+	to_read = (msg_size > buf_size) ? buf_size : msg_size;
+
+	/* Does the message wrap? */
+	if (to_read + rd_idx > to_host_size) {
+		unsigned long chunk_size = to_host_size - rd_idx;
+
+		ret = pvdec_read_vlr(reg_base, buf, chunk_size, to_host_off + rd_idx);
+		if (ret) {
+			dev_err(dev, "%s: failed to read chunk before wrap!\n", __func__);
+			return ret;
+		}
+		to_read -= chunk_size;
+		buf += chunk_size;
+		rd_idx = 0;
+		msg_size -= chunk_size;
+	}
+
+	/*
+	 * If the message wrapped, read the second chunk.
+	 * If it didn't, read first and only chunk
+	 */
+	ret = pvdec_read_vlr(reg_base, buf, to_read, to_host_off + rd_idx);
+	if (ret) {
+		dev_err(dev, "%s: failed to read message from vlr!\n", __func__);
+		return ret;
+	}
+
+	/* Update read index in the ring buffer */
+	rd_idx = (rd_idx + msg_size) % to_host_size;
+	VXD_WR_REG_ABS(reg_base, VLR_OFFSET +
+		PVDEC_FW_TO_HOST_RD_IDX_OFFSET, rd_idx);
+
+	msg_id = VXD_RD_REG_FIELD(val, PVDEC_FW, DEVA_GENMSG, MSG_ID);
+
+	ktime_get_real_ts64(&time);
+	for (loop = 0; loop < ARRAY_SIZE(vxd->time_fw); loop++) {
+		if (vxd->time_fw[loop].id == msg_id) {
+			vxd->time_fw[loop].end_time =
+				timespec64_to_ns((const struct timespec64 *)&time);
+#ifdef DEBUG_DECODER_DRIVER
+			dev_info(dev, "fw decode time is %llu us for msg_id x%0x\n",
+				 div_s64(vxd->time_fw[loop].end_time -
+				 vxd->time_fw[loop].start_time, 1000), msg_id);
+#endif
+					break;
+		}
+	}
+
+	if (loop == ARRAY_SIZE(vxd->time_fw))
+		dev_err(dev, "fw decode time for msg_id x%0x is not measured\n", msg_id);
+
+	return 0;
+}
+
+int vxd_pvdec_check_fw_status(const void *dev, void __iomem *reg_base)
+{
+	int ret;
+	unsigned int val = 0;
+
+	/* Obtain current fw status */
+	ret = pvdec_read_vlr(reg_base, &val, 1, PVDEC_FW_STATUS_OFFSET);
+	if (ret) {
+		dev_err(dev, "%s: failed to read fw status!\n", __func__);
+		return ret;
+	}
+
+	/* Check for fatal condition */
+	if (val == PVDEC_FW_STATUS_PANIC  || val == PVDEC_FW_STATUS_ASSERT ||
+	    val == PVDEC_FW_STATUS_SO)
+		return -1;
+
+	return 0;
+}
+
+static int pvdec_send_init_msg(const void *dev,
+			       void __iomem *reg_base,
+			       struct vxd_ena_params *ena_params)
+{
+	unsigned short msg_id = 0;
+	unsigned int msg[PVDEC_FW_DEVA_INIT_MSG_WRDS] = { 0 }, msg_wrd = 0;
+	struct vxd_dev *vxd;
+	int ret;
+
+#ifdef DEBUG_DECODER_DRIVER
+	dev_dbg(dev, "%s: rendec: %d@0x%x, crc: 0x%x\n", __func__,
+		ena_params->rendec_size, ena_params->rendec_addr, ena_params->crc);
+#endif
+
+	vxd = kzalloc(sizeof(*vxd), GFP_KERNEL);
+	if (!vxd)
+		return -1;
+
+	/* message type */
+	msg_wrd = VXD_WR_REG_FIELD(msg_wrd, PVDEC_FW, DEVA_GENMSG, MSG_TYPE,
+				   PVDEC_FW_MSG_TYPE_INIT);
+	VXD_WR_MSG_WRD(msg, PVDEC_FW, DEVA_GENMSG, msg_wrd);
+
+	/* rendec address */
+	VXD_WR_MSG_WRD(msg, PVDEC_FW_DEVA_INIT, RENDEC_ADDR0, ena_params->rendec_addr);
+
+	/* rendec size */
+	msg_wrd = 0;
+	msg_wrd = VXD_WR_REG_FIELD(msg_wrd, PVDEC_FW, DEVA_INIT, RENDEC_SIZE0,
+				   ena_params->rendec_size);
+	VXD_WR_MSG_WRD(msg, PVDEC_FW_DEVA_INIT, RENDEC_SIZE0, msg_wrd);
+
+	/* HEVC configuration */
+	msg_wrd = 0;
+	msg_wrd = VXD_WR_REG_FIELD(msg_wrd, PVDEC_FW, DEVA_INIT,
+				   HEVC_CFG_MAX_H_FOR_PIPE_WAIT, 0xFFFF);
+	VXD_WR_MSG_WRD(msg, PVDEC_FW_DEVA_INIT, HEVC_CFG, msg_wrd);
+
+	/* signature select */
+	VXD_WR_MSG_WRD(msg, PVDEC_FW_DEVA_INIT, SIG_SELECT, ena_params->crc);
+
+	/* partial frame notification timer divider */
+	msg_wrd = 0;
+	msg_wrd = VXD_WR_REG_FIELD(msg_wrd, PVDEC_FW, DEVA_INIT, PFNT_DIV, PVDEC_PFNT_DIV);
+	VXD_WR_MSG_WRD(msg, PVDEC_FW_DEVA_INIT, PFNT_DIV, msg_wrd);
+
+	/* firmware watchdog timeout value */
+	msg_wrd = VXD_WR_REG_FIELD(msg_wrd, PVDEC_FW, DEVA_INIT, FWWDT_MS, ena_params->fwwdt_ms);
+	VXD_WR_MSG_WRD(msg, PVDEC_FW_DEVA_INIT, FWWDT_MS, msg_wrd);
+
+	ret = vxd_pvdec_send_msg(dev, reg_base, msg, ARRAY_SIZE(msg), msg_id, vxd);
+	kfree(vxd);
+
+	return ret;
+}
+
+int vxd_pvdec_ena(const void *dev, void __iomem *reg_base,
+		  struct vxd_ena_params *ena_params,
+		  struct vxd_fw_hdr *fw_hdr,
+		  unsigned int *freq_khz)
+{
+	int ret;
+	unsigned int mtx_ram_size = 0;
+	unsigned char dma_channel = 0;
+
+	ret = vxd_pvdec_init(dev, reg_base);
+	if (ret) {
+		dev_err(dev, "%s: PVDEC init failed!\n", __func__);
+		return ret;
+	}
+
+	ret = pvdec_get_mtx_ram_size(reg_base, &mtx_ram_size);
+	if (ret) {
+		dev_err(dev, "%s: failed to get MTX RAM size!\n", __func__);
+		return ret;
+	}
+
+	if (mtx_ram_size < fw_hdr->core_size) {
+		dev_err(dev, "%s: FW larger than MTX RAM size (%u < %d)!\n",
+			__func__, mtx_ram_size, fw_hdr->core_size);
+		return -EINVAL;
+	}
+
+	/* Apply pre boot settings - if any */
+	pvdec_pre_boot_setup(dev, reg_base, ena_params);
+
+	pvdec_prep_fw_upload(dev, reg_base, ena_params, dma_channel);
+
+	ret = pvdec_start_fw_dma(dev, reg_base, dma_channel, fw_hdr->core_size, freq_khz);
+
+	if (ret) {
+		dev_err(dev, "%s: failed to load FW! (%d)", __func__, ret);
+		pvdec_mtx_status_dump(reg_base, NULL);
+		return ret;
+	}
+
+	/* Apply final settings - if any */
+	pvdec_post_boot_setup(dev, reg_base, *freq_khz);
+
+	ret = pvdec_poll_fw_boot(reg_base, &ena_params->boot_poll);
+	if (ret) {
+		dev_err(dev, "%s: FW failed to boot! (%d)!\n", __func__, ret);
+		return ret;
+	}
+
+	ret = pvdec_send_init_msg(dev, reg_base, ena_params);
+	if (ret) {
+		dev_err(dev, "%s: failed to send init message! (%d)!\n", __func__, ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+int vxd_pvdec_dis(const void *dev, void __iomem *reg_base)
+{
+	int ret = pvdec_enable_clocks(reg_base);
+
+	if (ret) {
+		dev_err(dev, "%s: failed to enable clocks! (%d)\n", __func__, ret);
+		return ret;
+	}
+
+	ret = pvdec_reset(reg_base, TRUE);
+	if (ret) {
+		dev_err(dev, "%s: VXD reset failed! (%d)\n", __func__, ret);
+		return ret;
+	}
+
+	ret = pvdec_disable_clocks(reg_base);
+	if (ret) {
+		dev_err(dev, "%s: VXD disable clocks failed! (%d)\n", __func__, ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+/*
+ * Invalidate VXD's MMU cache.
+ */
+int vxd_pvdec_mmu_flush(const void *dev, void __iomem *reg_base)
+{
+	unsigned int reg = VXD_RD_REG(reg_base, IMG_VIDEO_BUS4_MMU, MMU_CONTROL1);
+
+	if (reg == PVDEC_INVALID_HW_STATE) {
+		dev_err(dev, "%s: invalid HW state!\n", __func__);
+		return -EIO;
+	}
+
+	reg = VXD_WR_REG_FIELD(reg, IMG_VIDEO_BUS4_MMU, MMU_CONTROL1, MMU_INVALDC, 0xF);
+	VXD_WR_REG(reg_base, IMG_VIDEO_BUS4_MMU, MMU_CONTROL1, reg);
+
+#ifdef DEBUG_DECODER_DRIVER
+	dev_dbg(dev, "%s: device MMU cache invalidated!\n", __func__);
+#endif
+
+	return 0;
+}
+
+irqreturn_t vxd_pvdec_clear_int(void __iomem *reg_base, unsigned int *irq_status)
+{
+	irqreturn_t ret = IRQ_NONE;
+	unsigned int enabled;
+	unsigned int status = VXD_RD_REG(reg_base, PVDEC_CORE, PVDEC_INT_STAT);
+
+	enabled = VXD_RD_REG(reg_base, PVDEC_CORE, PVDEC_HOST_INT_ENA);
+
+	status &= enabled;
+	/* Store the last irq status */
+	*irq_status |= status;
+
+	if (status & (PVDEC_CORE_PVDEC_INT_STAT_HOST_MMU_FAULT_IRQ_MASK |
+		PVDEC_CORE_PVDEC_INT_STAT_HOST_PROC_IRQ_MASK))
+		ret = IRQ_WAKE_THREAD;
+
+	/* Disable MMU interrupts - clearing is not enough */
+	if (status & PVDEC_CORE_PVDEC_INT_STAT_HOST_MMU_FAULT_IRQ_MASK) {
+		enabled &= ~PVDEC_CORE_PVDEC_INT_STAT_HOST_MMU_FAULT_IRQ_MASK;
+		VXD_WR_REG(reg_base, PVDEC_CORE, PVDEC_HOST_INT_ENA, enabled);
+	}
+
+	VXD_WR_REG(reg_base, PVDEC_CORE, PVDEC_INT_CLEAR, status);
+
+	return ret;
+}
+
+/*
+ * Check if there's enough space in comms RAM to submit <msg_size> dwords long
+ * message. This function also submits a padding message if it will be
+ * necessary for this particular message.
+ *
+ * return 0 if there is enough space,
+ * return -EBUSY if there is not enough space,
+ * return another fault code in case of an error.
+ */
+int vxd_pvdec_msg_fit(const void *dev, void __iomem *reg_base, unsigned long msg_size)
+{
+	int ret = pvdec_check_comms_space(reg_base, msg_size, TRUE);
+
+	/*
+	 * In specific environment, when to_mtx buffer is small, and messages
+	 * the userspace is submitting are large (e.g. FWBSP flow), it's
+	 * possible that firmware will consume the padding message sent by
+	 * vxd_pvdec_msg_fit() immediately. Retry the check.
+	 */
+	if (ret == -EBUSY) {
+		unsigned int flags = VXD_RD_REG_ABS(reg_base,
+				VLR_OFFSET + PVDEC_FW_FLAGS_OFFSET) |
+			PVDEC_FWFLAG_FAKE_COMPLETION;
+
+#ifdef DEBUG_DECODER_DRIVER
+		dev_dbg(dev, "comms space full, asking fw to send empty msg when space is available");
+#endif
+
+		VXD_WR_REG_ABS(reg_base, VLR_OFFSET + PVDEC_FW_FLAGS_OFFSET, flags);
+		ret = pvdec_check_comms_space(reg_base, msg_size, FALSE);
+	}
+
+	return ret;
+}
+
+void vxd_pvdec_get_state(const void *dev, void __iomem *reg_base,
+			 unsigned int num_pipes,
+			 struct vxd_hw_state *state)
+{
+	unsigned char pipe;
+#ifdef DEBUG_DECODER_DRIVER
+	unsigned int state_cfg = VXD_RD_REG_ABS(reg_base, (VLR_OFFSET +
+			PVDEC_FW_STATE_BUF_CFG_OFFSET));
+
+	unsigned short state_size = PVDEC_FW_COM_BUF_SIZE(state_cfg);
+	unsigned short state_off = PVDEC_FW_COM_BUF_OFF(state_cfg);
+
+	/*
+	 * The generic fw progress counter
+	 * is the first element in the fw state
+	 */
+	dev_dbg(dev, "%s: state off: 0x%x, size: 0x%x\n", __func__, state_off, state_size);
+	state->fw_counter = VXD_RD_REG_ABS(reg_base, (VLR_OFFSET + state_off));
+	dev_dbg(dev, "%s: fw_counter: 0x%x\n", __func__, state->fw_counter);
+#endif
+
+	/* We just combine the macroblocks being processed by the HW */
+	for (pipe = 0; pipe < num_pipes; pipe++) {
+		unsigned int p_off = VXD_GET_PIPE_OFF(num_pipes, pipe + 1);
+		unsigned int reg_val;
+
+		/* Front-end */
+		unsigned int reg_off = VXD_GET_REG_OFF(PVDEC_ENTROPY, ENTROPY_LAST_MB);
+
+		state->fe_status[pipe] = VXD_RD_REG_ABS(reg_base, reg_off + p_off);
+
+		reg_off = VXD_GET_REG_OFF(MSVDX_VEC, VEC_ENTDEC_INFORMATION);
+		state->fe_status[pipe] |= VXD_RD_REG_ABS(reg_base, reg_off + p_off);
+
+		/* Back-end */
+		reg_off = VXD_GET_REG_OFF(PVDEC_VEC_BE, VEC_BE_STATUS);
+		state->be_status[pipe] = VXD_RD_REG_ABS(reg_base, reg_off + p_off);
+		reg_off = VXD_GET_REG_OFF(MSVDX_VDMC, VDMC_MACROBLOCK_NUMBER);
+		state->be_status[pipe] |= VXD_RD_REG_ABS(reg_base, reg_off + p_off);
+
+		/*
+		 * Take DMAC channels 2/3 into consideration to cover
+		 * parser progress on SR1/2
+		 */
+		reg_off = VXD_GET_RPT_REG_OFF(DMAC, DMAC_COUNT, 2);
+		reg_val = VXD_RD_REG_ABS(reg_base, reg_off + p_off);
+		state->dmac_status[pipe][0] = VXD_RD_REG_FIELD(reg_val, DMAC, DMAC_COUNT, CNT);
+		reg_off = VXD_GET_RPT_REG_OFF(DMAC, DMAC_COUNT, 3);
+		reg_val = VXD_RD_REG_ABS(reg_base, reg_off + p_off);
+		state->dmac_status[pipe][1] = VXD_RD_REG_FIELD(reg_val, DMAC, DMAC_COUNT, CNT);
+	}
+}
+
+/*
+ * Check for the source of the last interrupt.
+ *
+ * return 0 if nothing serious happened,
+ * return -EFAULT if there was a critical interrupt detected.
+ */
+int vxd_pvdec_check_irq(const void *dev, void __iomem *reg_base, unsigned int irq_status)
+{
+	if (irq_status & PVDEC_CORE_PVDEC_INT_STAT_HOST_MMU_FAULT_IRQ_MASK) {
+		unsigned int status0 =
+			VXD_RD_REG(reg_base, IMG_VIDEO_BUS4_MMU, MMU_STATUS0);
+		unsigned int status1 =
+			VXD_RD_REG(reg_base, IMG_VIDEO_BUS4_MMU, MMU_STATUS1);
+
+		unsigned int addr = VXD_RD_REG_FIELD(status0, IMG_VIDEO_BUS4_MMU,
+				MMU_STATUS0, MMU_FAULT_ADDR) << 12;
+		unsigned char reason = VXD_RD_REG_FIELD(status0, IMG_VIDEO_BUS4_MMU,
+				MMU_STATUS0, MMU_PF_N_RW);
+		unsigned char requestor = VXD_RD_REG_FIELD(status1, IMG_VIDEO_BUS4_MMU,
+				MMU_STATUS1, MMU_FAULT_REQ_ID);
+		unsigned char type = VXD_RD_REG_FIELD(status1, IMG_VIDEO_BUS4_MMU,
+				MMU_STATUS1, MMU_FAULT_RNW);
+		unsigned char secure = VXD_RD_REG_FIELD(status0, IMG_VIDEO_BUS4_MMU,
+				MMU_STATUS0, MMU_SECURE_FAULT);
+
+#ifdef DEBUG_DECODER_DRIVER
+		dev_dbg(dev, "%s: MMU Page Fault s0:%08x s1:%08x", __func__, status0, status1);
+#endif
+
+		dev_err(dev, "%s: MMU %s fault from %s while %s @ 0x%08X", __func__,
+			(reason) ? "Page" : "Protection",
+			(requestor & (0x1)) ? "dmac" :
+			(requestor & (0x2)) ? "vec"  :
+			(requestor & (0x4)) ? "vdmc" :
+			(requestor & (0x8)) ? "vdeb" : "unknown source",
+			(type) ? "reading" : "writing", addr);
+
+		if (secure)
+			dev_err(dev, "%s: MMU security policy violation detected!", __func__);
+
+		return -EFAULT;
+	}
+
+	return 0;
+}
+
+/*
+ * This functions enables the clocks, fetches the core properties, stores them
+ * in the <props> structure and DISABLES the clocks. Do not call when hardware
+ * is busy!
+ */
+int vxd_pvdec_get_props(const void *dev, void __iomem *reg_base, struct vxd_core_props *props)
+{
+#ifdef DEBUG_DECODER_DRIVER
+	unsigned char num_pix_pipes, pipe;
+#endif
+	int ret = pvdec_enable_clocks(reg_base);
+
+	if (ret) {
+		dev_err(dev, "%s: failed to enable clocks!\n", __func__);
+		return ret;
+	}
+
+	ret = pvdec_get_mtx_ram_size(reg_base, &props->mtx_ram_size);
+	if (ret) {
+		dev_err(dev, "%s: failed to get MTX ram size!\n", __func__);
+		return ret;
+	}
+
+	ret = pvdec_get_properties(reg_base, props);
+	if (ret) {
+		dev_err(dev, "%s: failed to get VXD props!\n", __func__);
+		return ret;
+	}
+
+	if (pvdec_disable_clocks(reg_base))
+		dev_err(dev, "%s: failed to disable clocks!\n", __func__);
+
+#ifdef DEBUG_DECODER_DRIVER
+	num_pix_pipes = VXD_NUM_PIX_PIPES(*props);
+
+	/* Warning already raised in pvdec_get_properties() */
+	if (unlikely(num_pix_pipes > VXD_MAX_PIPES))
+		num_pix_pipes = VXD_MAX_PIPES;
+	dev_dbg(dev, "%s: core_rev: 0x%08x\n", __func__, props->core_rev);
+	dev_dbg(dev, "%s: pvdec_core_id: 0x%08x\n", __func__, props->pvdec_core_id);
+	dev_dbg(dev, "%s: mmu_config0: 0x%08x\n", __func__, props->mmu_config0);
+	dev_dbg(dev, "%s: mmu_config1: 0x%08x\n", __func__, props->mmu_config1);
+	dev_dbg(dev, "%s: mtx_ram_size: %u\n", __func__, props->mtx_ram_size);
+	dev_dbg(dev, "%s: pix max frame: 0x%08x\n", __func__, props->pixel_max_frame_cfg);
+
+	for (pipe = 1; pipe <= num_pix_pipes; ++pipe)
+		dev_dbg(dev, "%s:  pipe %u, 0x%08x, misc 0x%08x\n",
+			__func__, pipe, props->pixel_pipe_cfg[pipe - 1],
+			props->pixel_misc_cfg[pipe - 1]);
+	dev_dbg(dev, "%s: dbg fifo size: %u\n", __func__, props->dbg_fifo_size);
+#endif
+	return 0;
+}
diff --git a/drivers/staging/media/vxd/decoder/vxd_pvdec_priv.h b/drivers/staging/media/vxd/decoder/vxd_pvdec_priv.h
new file mode 100644
index 000000000000..6cc9aef45904
--- /dev/null
+++ b/drivers/staging/media/vxd/decoder/vxd_pvdec_priv.h
@@ -0,0 +1,126 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * VXD PVDEC Private header file
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ *	Amit Makani <amit.makani@ti.com>
+ *
+ * Re-written for upstreamimg
+ *	Sidraya Jayagond <sidraya.bj@pathpartnertech.com>
+ *	Prashanth Kumar Amai <prashanth.ka@pathpartnertech.com>
+ */
+
+#ifndef _VXD_PVDEC_PRIV_H
+#define _VXD_PVDEC_PRIV_H
+#include <linux/interrupt.h>
+
+#include "img_dec_common.h"
+#include "vxd_pvdec_regs.h"
+#include "vxd_dec.h"
+
+#ifdef ERROR_RECOVERY_SIMULATION
+/* kernel object used to debug. Declared in v4l2_int.c */
+extern struct kobject *vxd_dec_kobject;
+extern int disable_fw_irq_value;
+extern int g_module_irq;
+#endif
+
+struct vxd_boot_poll_params {
+	unsigned int msleep_cycles;
+};
+
+struct vxd_ena_params {
+	struct vxd_boot_poll_params boot_poll;
+
+	unsigned long fw_buf_size;
+	unsigned int fw_buf_virt_addr;
+	/*
+	 * VXD's MMU virtual address of a firmware
+	 * buffer.
+	 */
+	unsigned int ptd; /* Shifted physical address of PTD */
+
+	/* Required for firmware upload via registers. */
+	struct {
+		const unsigned char *buf; /* Firmware blob buffer */
+
+	} regs_data;
+
+	struct {
+		unsigned secure : 1;        /* Secure flow indicator. */
+		unsigned wait_dbg_fifo : 1; /*
+					     * Indicates that fw shall use
+					     * blocking mode when putting logs
+					     * into debug fifo
+					     */
+	};
+
+	/* Structure containing memory staller configuration */
+	struct {
+		unsigned int *data;          /* Configuration data array */
+		unsigned char size;            /* Configuration size in dwords */
+
+	} mem_staller;
+
+	unsigned int fwwdt_ms;    /* Firmware software watchdog timeout value */
+
+	unsigned int crc;         /* HW signatures to be enabled by firmware */
+	unsigned int rendec_addr; /* VXD's virtual address of a rendec buffer */
+	unsigned short rendec_size; /* Size of a rendec buffer in 4K pages */
+};
+
+int vxd_pvdec_init(const void *dev, void __iomem *reg_base);
+
+int vxd_pvdec_ena(const void *dev, void __iomem *reg_base,
+		  struct vxd_ena_params *ena_params, struct vxd_fw_hdr *hdr,
+		  unsigned int *freq_khz);
+
+int vxd_pvdec_dis(const void *dev, void __iomem *reg_base);
+
+int vxd_pvdec_mmu_flush(const void *dev, void __iomem *reg_base);
+
+int vxd_pvdec_send_msg(const void *dev, void __iomem *reg_base,
+		       unsigned int *msg, unsigned long msg_size, unsigned short msg_id,
+		       struct vxd_dev *ctx);
+
+int vxd_pvdec_pend_msg_info(const void *dev, void __iomem *reg_base,
+			    unsigned long *size, unsigned short *msg_id,
+			    unsigned char *not_last_msg);
+
+int vxd_pvdec_recv_msg(const void *dev, void __iomem *reg_base,
+		       unsigned int *buf, unsigned long buf_size, struct vxd_dev *ctx);
+
+int vxd_pvdec_check_fw_status(const void *dev, void __iomem *reg_base);
+
+unsigned long vxd_pvdec_peek_mtx_fifo(const void *dev,
+				      void __iomem *reg_base);
+
+unsigned long vxd_pvdec_read_mtx_fifo(const void *dev, void __iomem *reg_base,
+				      unsigned int *buf, unsigned long size);
+
+irqreturn_t vxd_pvdec_clear_int(void __iomem *reg_base, unsigned int *irq_status);
+
+int vxd_pvdec_check_irq(const void *dev, void __iomem *reg_base,
+			unsigned int irq_status);
+
+int vxd_pvdec_msg_fit(const void *dev, void __iomem *reg_base,
+		      unsigned long msg_size);
+
+void vxd_pvdec_get_state(const void *dev, void __iomem *reg_base,
+			 unsigned int num_pipes, struct vxd_hw_state *state);
+
+int vxd_pvdec_get_props(const void *dev, void __iomem *reg_base,
+			struct vxd_core_props *props);
+
+unsigned long vxd_pvdec_get_dbg_fifo_size(void __iomem *reg_base);
+
+int vxd_pvdec_dump_mtx_ram(const void *dev, void __iomem *reg_base,
+			   unsigned int addr, unsigned int count, unsigned int *buf);
+
+int vxd_pvdec_dump_mtx_status(const void *dev, void __iomem *reg_base,
+			      unsigned int *array, unsigned int array_size);
+
+#endif /* _VXD_PVDEC_PRIV_H */
diff --git a/drivers/staging/media/vxd/decoder/vxd_pvdec_regs.h b/drivers/staging/media/vxd/decoder/vxd_pvdec_regs.h
new file mode 100644
index 000000000000..2d8cf9ef8df7
--- /dev/null
+++ b/drivers/staging/media/vxd/decoder/vxd_pvdec_regs.h
@@ -0,0 +1,779 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * VXD PVDEC registers header file
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ *	Angela Stegmaier <angelabaker@ti.com>
+ *
+ * Re-written for upstreamimg
+ *	Sidraya Jayagond <sidraya.bj@pathpartnertech.com>
+ *	Prashanth Kumar Amai <prashanth.ka@pathpartnertech.com>
+ */
+
+#ifndef VXD_PVDEC_REGS_H
+#define VXD_PVDEC_REGS_H
+
+/* ************************* VXD-specific values *************************** */
+/* 0x10 for code, 0x18 for data. */
+#define PVDEC_MTX_CORE_MEM 0x18
+/* Iteration time out counter for MTX I/0. */
+#define PVDEC_TIMEOUT_COUNTER 1000
+/* Partial frame notification timer divider. */
+#define PVDEC_PFNT_DIV 0
+/* Value returned by register reads when HW enters invalid state (FPGA) */
+#define PVDEC_INVALID_HW_STATE 0x000dead1
+
+/* Default core clock for pvdec */
+#define PVDEC_CLK_MHZ_DEFAULT 200
+
+/* Offsets of registers groups within VXD. */
+#define PVDEC_PROC_OFFSET 0x0000
+/* 0x34c: Skip DMA registers when running against CSIM (vritual platform) */
+#define PVDEC_PROC_SIZE 0x34C  /* 0x3FF */
+
+#define PVDEC_CORE_OFFSET 0x0400
+#define PVDEC_CORE_SIZE 0x3FF
+
+#define MTX_CORE_OFFSET PVDEC_PROC_OFFSET
+#define MTX_CORE_SIZE PVDEC_PROC_SIZE
+
+#define VIDEO_BUS4_MMU_OFFSET 0x1000
+#define VIDEO_BUS4_MMU_SIZE 0x1FF
+
+#define IMG_VIDEO_BUS4_MMU_OFFSET VIDEO_BUS4_MMU_OFFSET
+#define IMG_VIDEO_BUS4_MMU_SIZE VIDEO_BUS4_MMU_SIZE
+
+#define VLR_OFFSET 0x2000
+#define VLR_SIZE 0x1000
+
+/* PVDEC_ENTROPY defined in uapi/vxd_pvdec.h */
+
+#define PVDEC_PIXEL_OFFSET 0x4000
+#define PVDEC_PIXEL_SIZE 0x1FF
+
+/* PVDEC_VEC_BE defined in uapi/vxd_pvdec.h */
+
+/* MSVDX_VEC defined in uapi/vxd_pvdec.h */
+
+#define MSVDX_VDMC_OFFSET 0x6800
+#define MSVDX_VDMC_SIZE 0x7F
+
+#define DMAC_OFFSET 0x6A00
+#define DMAC_SIZE 0x1FF
+
+#define PVDEC_TEST_OFFSET 0xFF00
+#define PVDEC_TEST_SIZE 0xFF
+
+/* *********************** firmware specific values ************************* */
+
+/* layout of COMMS RAM */
+
+#define PVDEC_FW_COMMS_HDR_SIZE 0x38
+
+#define PVDEC_FW_STATUS_OFFSET 0x00
+#define PVDEC_FW_TASK_STATUS_OFFSET 0x04
+#define PVDEC_FW_ID_OFFSET 0x08
+#define PVDEC_FW_MTXPC_OFFSET 0x0c
+#define PVDEC_FW_MSG_COUNTER_OFFSET 0x10
+#define PVDEC_FW_SIGNATURE_OFFSET 0x14
+#define PVDEC_FW_TO_HOST_BUF_CONF_OFFSET 0x18
+#define PVDEC_FW_TO_HOST_RD_IDX_OFFSET 0x1c
+#define PVDEC_FW_TO_HOST_WR_IDX_OFFSET 0x20
+#define PVDEC_FW_TO_MTX_BUF_CONF_OFFSET 0x24
+#define PVDEC_FW_TO_MTX_RD_IDX_OFFSET 0x28
+#define PVDEC_FW_FLAGS_OFFSET 0x2c
+#define PVDEC_FW_TO_MTX_WR_IDX_OFFSET 0x30
+#define PVDEC_FW_STATE_BUF_CFG_OFFSET 0x34
+
+/* firmware status */
+
+#define PVDEC_FW_STATUS_PANIC  0x2
+#define PVDEC_FW_STATUS_ASSERT 0x3
+#define PVDEC_FW_STATUS_SO     0x8
+
+/* firmware flags */
+
+#define PVDEC_FWFLAG_BIG_TO_HOST_BUFFER 0x00000002
+#define PVDEC_FWFLAG_FORCE_FS_FLOW 0x00000004
+#define PVDEC_FWFLAG_DISABLE_WATCHDOGS 0x00000008
+#define PVDEC_FWFLAG_DISABLE_AUTONOMOUS_RESET 0x00000040
+#define PVDEC_FWFLAG_DISABLE_IDLE_GPIO 0x00002000
+#define PVDEC_FWFLAG_ENABLE_ERROR_CONCEALMENT 0x00100000
+#define PVDEC_FWFLAG_DISABLE_GENC_FLUSHING 0x00800000
+#define PVDEC_FWFLAG_FAKE_COMPLETION 0x20000000
+#define PVDEC_FWFLAG_DISABLE_COREWDT_TIMERS 0x01000000
+
+/* firmware message header */
+
+#define PVDEC_FW_DEVA_GENMSG_OFFSET 0
+
+#define PVDEC_FW_DEVA_GENMSG_MSG_ID_MASK 0xFFFF0000
+#define PVDEC_FW_DEVA_GENMSG_MSG_ID_SHIFT 16
+
+#define PVDEC_FW_DEVA_GENMSG_MSG_TYPE_MASK 0xFF00
+#define PVDEC_FW_DEVA_GENMSG_MSG_TYPE_SHIFT 8
+
+#define PVDEC_FW_DEVA_GENMSG_NOT_LAST_MSG_MASK 0x80
+#define PVDEC_FW_DEVA_GENMSG_NOT_LAST_MSG_SHIFT 7
+
+#define PVDEC_FW_DEVA_GENMSG_MSG_SIZE_MASK 0x7F
+#define PVDEC_FW_DEVA_GENMSG_MSG_SIZE_SHIFT 0
+
+/* firmware init message */
+
+#define PVDEC_FW_DEVA_INIT_MSG_WRDS 9
+
+#define PVDEC_FW_DEVA_INIT_RENDEC_ADDR0_OFFSET 0xC
+
+#define PVDEC_FW_DEVA_INIT_RENDEC_SIZE0_OFFSET 0x10
+#define PVDEC_FW_DEVA_INIT_RENDEC_SIZE0_MASK 0xFFFF
+#define PVDEC_FW_DEVA_INIT_RENDEC_SIZE0_SHIFT 0
+
+#define PVDEC_FW_DEVA_INIT_HEVC_CFG_OFFSET 0x14
+#define PVDEC_FW_DEVA_INIT_HEVC_CFG_MAX_H_FOR_PIPE_WAIT_MASK 0xFFFF0000
+#define PVDEC_FW_DEVA_INIT_HEVC_CFG_MAX_H_FOR_PIPE_WAIT_SHIFT 16
+#define PVDEC_FW_DEVA_INIT_HEVC_CFG_MIN_H_FOR_DUAL_PIPE_MASK 0xFFFF
+#define PVDEC_FW_DEVA_INIT_HEVC_CFG_MIN_H_FOR_DUAL_PIPE_SHIFT 0
+
+#define PVDEC_FW_DEVA_INIT_SIG_SELECT_OFFSET 0x18
+
+#define PVDEC_FW_DEVA_INIT_DBG_DELAYS_OFFSET 0x1C
+
+#define PVDEC_FW_DEVA_INIT_PFNT_DIV_OFFSET 0x20
+#define PVDEC_FW_DEVA_INIT_PFNT_DIV_MASK 0xFFFF0000
+#define PVDEC_FW_DEVA_INIT_PFNT_DIV_SHIFT 16
+
+#define PVDEC_FW_DEVA_INIT_FWWDT_MS_OFFSET 0x20
+#define PVDEC_FW_DEVA_INIT_FWWDT_MS_MASK 0xFFFF
+#define PVDEC_FW_DEVA_INIT_FWWDT_MS_SHIFT 0
+
+/* firmware message types */
+#define PVDEC_FW_MSG_TYPE_PADDING 0
+#define PVDEC_FW_MSG_TYPE_INIT 0x80
+
+/* miscellaneous */
+
+#define PVDEC_FW_READY_SIG 0xa5a5a5a5
+
+#define PVDEC_FW_COM_BUF_SIZE(cfg) ((cfg) & 0x0000ffff)
+#define PVDEC_FW_COM_BUF_OFF(cfg) (((cfg) & 0xffff0000) >> 16)
+
+/*
+ * Timer divider calculation macro.
+ * NOTE: The Timer divider is only 8bit field
+ * so we set it for 2MHz timer base to cover wider
+ * range of core frequencies on real platforms (freq > 255MHz)
+ */
+#define PVDEC_CALC_TIMER_DIV(val) (((val) - 1) / 2)
+
+#define MTX_CORE_STATUS_ELEMENTS 4
+
+#define PVDEC_CORE_MEMSTALLER_ELEMENTS 7
+
+/* ********************** PVDEC_CORE registers group ************************ */
+
+/* register PVDEC_SOFT_RESET */
+#define PVDEC_CORE_PVDEC_SOFT_RST_OFFSET 0x0000
+
+#define PVDEC_CORE_PVDEC_SOFT_RST_PVDEC_PIXEL_PROC_SOFT_RST_MASK 0xFF000000
+#define PVDEC_CORE_PVDEC_SOFT_RST_PVDEC_PIXEL_PROC_SOFT_RST_SHIFT 24
+
+#define PVDEC_CORE_PVDEC_SOFT_RST_PVDEC_ENTROPY_SOFT_RST_MASK 0x00FF0000
+#define PVDEC_CORE_PVDEC_SOFT_RST_PVDEC_ENTROPY_SOFT_RST_SHIFT 16
+
+#define PVDEC_CORE_PVDEC_SOFT_RST_PVDEC_MMU_SOFT_RST_MASK 0x00000002
+#define PVDEC_CORE_PVDEC_SOFT_RST_PVDEC_MMU_SOFT_RST_SHIFT 1
+
+#define PVDEC_CORE_PVDEC_SOFT_RST_PVDEC_SOFT_RST_MASK 0x00000001
+#define PVDEC_CORE_PVDEC_SOFT_RST_PVDEC_SOFT_RST_SHIFT 0
+
+/* register PVDEC_HOST_INTERRUPT_STATUS */
+#define PVDEC_CORE_PVDEC_INT_STAT_OFFSET 0x0010
+
+#define PVDEC_CORE_PVDEC_INT_STAT_HOST_SYS_WDT_MASK 0x10000000
+#define PVDEC_CORE_PVDEC_INT_STAT_HOST_SYS_WDT_SHIFT 28
+
+#define PVDEC_CORE_PVDEC_INT_STAT_HOST_READ_TIMEOUT_PROC_IRQ_MASK 0x08000000
+#define PVDEC_CORE_PVDEC_INT_STAT_HOST_READ_TIMEOUT_PROC_IRQ_SHIFT 27
+
+#define PVDEC_CORE_PVDEC_INT_STAT_HOST_COMMAND_TIMEOUT_PROC_IRQ_MASK 0x04000000
+#define PVDEC_CORE_PVDEC_INT_STAT_HOST_COMMAND_TIMEOUT_PROC_IRQ_SHIFT 26
+
+#define PVDEC_CORE_PVDEC_INT_STAT_HOST_READ_TIMEOUT_HOST_IRQ_MASK 0x02000000
+#define PVDEC_CORE_PVDEC_INT_STAT_HOST_READ_TIMEOUT_HOST_IRQ_SHIFT 25
+
+#define PVDEC_CORE_PVDEC_INT_STAT_HOST_COMMAND_TIMEOUT_HOST_IRQ_MASK 0x01000000
+#define PVDEC_CORE_PVDEC_INT_STAT_HOST_COMMAND_TIMEOUT_HOST_IRQ_SHIFT 24
+
+#define PVDEC_CORE_PVDEC_INT_STAT_HOST_PROC_GPIO_IRQ_MASK 0x00200000
+#define PVDEC_CORE_PVDEC_INT_STAT_HOST_PROC_GPIO_IRQ_SHIFT 21
+
+#define PVDEC_CORE_PVDEC_INT_STAT_HOST_PROC_IRQ_MASK 0x00100000
+#define PVDEC_CORE_PVDEC_INT_STAT_HOST_PROC_IRQ_SHIFT 20
+
+#define PVDEC_CORE_PVDEC_INT_STAT_HOST_MMU_FAULT_IRQ_MASK 0x00010000
+#define PVDEC_CORE_PVDEC_INT_STAT_HOST_MMU_FAULT_IRQ_SHIFT 16
+
+#define PVDEC_CORE_PVDEC_INT_STAT_HOST_PIXEL_PROCESSING_IRQ_MASK 0x0000FF00
+#define PVDEC_CORE_PVDEC_INT_STAT_HOST_PIXEL_PROCESSING_IRQ_SHIFT 8
+
+#define PVDEC_CORE_PVDEC_INT_STAT_HOST_ENTROPY_PIPE_IRQ_MASK 0x000000FF
+#define PVDEC_CORE_PVDEC_INT_STAT_HOST_ENTROPY_PIPE_IRQ_SHIFT 0
+
+/* register PVDEC_INTERRUPT_CLEAR */
+#define PVDEC_CORE_PVDEC_INT_CLEAR_OFFSET 0x0014
+
+#define PVDEC_CORE_PVDEC_INT_CLEAR_IRQ_CLEAR_MASK 0xFFFF0000
+#define PVDEC_CORE_PVDEC_INT_CLEAR_IRQ_CLEAR_SHIFT 16
+
+/* register PVDEC_HOST_INTERRUPT_ENABLE */
+#define PVDEC_CORE_PVDEC_HOST_INT_ENA_OFFSET 0x0018
+
+#define PVDEC_CORE_PVDEC_HOST_INT_ENA_HOST_IRQ_ENABLE_MASK 0xFFFF0000
+#define PVDEC_CORE_PVDEC_HOST_INT_ENA_HOST_IRQ_ENABLE_SHIFT 16
+
+/* Register PVDEC_MAN_CLK_ENABLE */
+#define PVDEC_CORE_PVDEC_MAN_CLK_ENA_OFFSET 0x0040
+
+#define PVDEC_CORE_PVDEC_MAN_CLK_ENA_PIXEL_PROC_MAN_CLK_ENA_MASK 0xFF000000
+#define PVDEC_CORE_PVDEC_MAN_CLK_ENA_PIXEL_PROC_MAN_CLK_ENA_SHIFT 24
+
+#define PVDEC_CORE_PVDEC_MAN_CLK_ENA_ENTROPY_PIPE_MAN_CLK_ENA_MASK 0x00FF0000
+#define PVDEC_CORE_PVDEC_MAN_CLK_ENA_ENTROPY_PIPE_MAN_CLK_ENA_SHIFT 16
+
+#define PVDEC_CORE_PVDEC_MAN_CLK_ENA_MEM_MAN_CLK_ENA_MASK 0x00000100
+#define PVDEC_CORE_PVDEC_MAN_CLK_ENA_MEM_MAN_CLK_ENA_SHIFT 8
+
+#define PVDEC_CORE_PVDEC_MAN_CLK_ENA_PVDEC_REG_MAN_CLK_ENA_MASK 0x00000010
+#define PVDEC_CORE_PVDEC_MAN_CLK_ENA_PVDEC_REG_MAN_CLK_ENA_SHIFT 4
+
+#define PVDEC_CORE_PVDEC_MAN_CLK_ENA_PROC_MAN_CLK_ENA_MASK 0x00000002
+#define PVDEC_CORE_PVDEC_MAN_CLK_ENA_PROC_MAN_CLK_ENA_SHIFT 1
+
+#define PVDEC_CORE_PVDEC_MAN_CLK_ENA_CORE_MAN_CLK_ENA_MASK 0x00000001
+#define PVDEC_CORE_PVDEC_MAN_CLK_ENA_CORE_MAN_CLK_ENA_SHIFT 0
+
+/* register PVDEC_HOST_PIPE_SELECT */
+#define PVDEC_CORE_PVDEC_HOST_PIPE_SELECT_OFFSET 0x0060
+
+#define PVDEC_CORE_PVDEC_HOST_PIPE_SELECT_PIPE_SEL_MASK 0x0000000F
+#define PVDEC_CORE_PVDEC_HOST_PIPE_SELECT_PIPE_SEL_SHIFT 0
+
+/* register PROC_DEBUG */
+#define PVDEC_CORE_PROC_DEBUG_OFFSET 0x0100
+
+#define PVDEC_CORE_PROC_DEBUG_MTX_LAST_RAM_BANK_SIZE_MASK 0xFF000000
+#define PVDEC_CORE_PROC_DEBUG_MTX_LAST_RAM_BANK_SIZE_SHIFT 24
+
+#define PVDEC_CORE_PROC_DEBUG_MTX_RAM_BANK_SIZE_MASK 0x000F0000
+#define PVDEC_CORE_PROC_DEBUG_MTX_RAM_BANK_SIZE_SHIFT 16
+
+#define PVDEC_CORE_PROC_DEBUG_MTX_RAM_BANKS_MASK 0x00000F00
+#define PVDEC_CORE_PROC_DEBUG_MTX_RAM_BANKS_SHIFT 8
+
+#define PVDEC_CORE_PROC_DEBUG_MTX_RAM_NEW_REPRESENTATION_MASK 0x00000080
+#define PVDEC_CORE_PROC_DEBUG_MTX_RAM_NEW_REPRESENTATION_SHIFT 7
+
+#define PVDEC_CORE_PROC_DEBUG_PROC_DBG_GPIO_OUT_MASK 0x00000018
+#define PVDEC_CORE_PROC_DEBUG_PROC_DBG_GPIO_OUT_SHIFT 3
+
+#define PVDEC_CORE_PROC_DEBUG_PROC_DBG_IS_SLAVE_MASK 0x00000004
+#define PVDEC_CORE_PROC_DEBUG_PROC_DBG_IS_SLAVE_SHIFT 2
+
+#define PVDEC_CORE_PROC_DEBUG_PROC_DBG_GPIO_IN_MASK 0x00000003
+#define PVDEC_CORE_PROC_DEBUG_PROC_DBG_GPIO_IN_SHIFT 0
+
+/* register PROC_DMAC_CONTROL */
+#define PVDEC_CORE_PROC_DMAC_CONTROL_OFFSET 0x0104
+
+#define PVDEC_CORE_PROC_DMAC_CONTROL_BOOT_ON_DMA_CH0_MASK 0x80000000
+#define PVDEC_CORE_PROC_DMAC_CONTROL_BOOT_ON_DMA_CH0_SHIFT 31
+
+/* register PROC_DEBUG_FIFO */
+#define PVDEC_CORE_PROC_DBG_FIFO_OFFSET 0x0108
+
+#define PVDEC_CORE_PROC_DBG_FIFO_PROC_DBG_FIFO_MASK 0xFFFFFFFF
+#define PVDEC_CORE_PROC_DBG_FIFO_PROC_DBG_FIFO_SHIFT 0
+
+/* register PROC_DEBUG_FIFO_CTRL_0 */
+#define PVDEC_CORE_PROC_DBG_FIFO_CTRL0_OFFSET 0x010C
+
+#define PVDEC_CORE_PROC_DBG_FIFO_CTRL0_PROC_DBG_FIFO_COUNT_MASK 0xFFFF0000
+#define PVDEC_CORE_PROC_DBG_FIFO_CTRL0_PROC_DBG_FIFO_COUNT_SHIFT 16
+
+#define PVDEC_CORE_PROC_DBG_FIFO_CTRL0_PROC_DBG_FIFO_SIZE_MASK 0x0000FFFF
+#define PVDEC_CORE_PROC_DBG_FIFO_CTRL0_PROC_DBG_FIFO_SIZE_SHIFT 0
+
+/* register PVDEC_CORE_ID */
+#define PVDEC_CORE_PVDEC_CORE_ID_OFFSET 0x0230
+
+#define PVDEC_CORE_PVDEC_CORE_ID_GROUP_ID_MASK 0xFF000000
+#define PVDEC_CORE_PVDEC_CORE_ID_GROUP_ID_SHIFT 24
+
+#define PVDEC_CORE_PVDEC_CORE_ID_CORE_ID_MASK 0x00FF0000
+#define PVDEC_CORE_PVDEC_CORE_ID_CORE_ID_SHIFT 16
+
+#define PVDEC_CORE_PVDEC_CORE_ID_PVDEC_CORE_CONFIG_MASK 0x0000FFFF
+#define PVDEC_CORE_PVDEC_CORE_ID_PVDEC_CORE_CONFIG_SHIFT 0
+
+#define PVDEC_CORE_PVDEC_CORE_ID_ENT_PIPES_MASK 0x0000000F
+#define PVDEC_CORE_PVDEC_CORE_ID_ENT_PIPES_SHIFT 0
+
+#define PVDEC_CORE_PVDEC_CORE_ID_PIX_PIPES_MASK 0x000000F0
+#define PVDEC_CORE_PVDEC_CORE_ID_PIX_PIPES_SHIFT 4
+
+/* register PVDEC_CORE_REV */
+#define PVDEC_CORE_PVDEC_CORE_REV_OFFSET 0x0240
+
+#define PVDEC_CORE_PVDEC_CORE_REV_PVDEC_DESIGNER_MASK 0xFF000000
+#define PVDEC_CORE_PVDEC_CORE_REV_PVDEC_DESIGNER_SHIFT 24
+
+#define PVDEC_CORE_PVDEC_CORE_REV_PVDEC_MAJOR_REV_MASK 0x00FF0000
+#define PVDEC_CORE_PVDEC_CORE_REV_PVDEC_MAJOR_REV_SHIFT 16
+
+#define PVDEC_CORE_PVDEC_CORE_REV_PVDEC_MINOR_REV_MASK 0x0000FF00
+#define PVDEC_CORE_PVDEC_CORE_REV_PVDEC_MINOR_REV_SHIFT 8
+
+#define PVDEC_CORE_PVDEC_CORE_REV_PVDEC_MAINT_REV_MASK 0x000000FF
+#define PVDEC_CORE_PVDEC_CORE_REV_PVDEC_MAINT_REV_SHIFT 0
+
+/* *********************** MTX_CORE registers group ************************* */
+
+/* register MTX_ENABLE  */
+#define MTX_CORE_MTX_ENABLE_OFFSET 0x0000
+
+/* register MTX_SYSC_TXTIMER. Note: it's not defined in PVDEC TRM. */
+#define MTX_CORE_MTX_SYSC_TXTIMER_OFFSET 0x0010
+
+/* register MTX_KICKI */
+#define MTX_CORE_MTX_KICKI_OFFSET 0x0088
+
+#define MTX_CORE_MTX_KICKI_MTX_KICKI_MASK 0x0000FFFF
+#define MTX_CORE_MTX_KICKI_MTX_KICKI_SHIFT 0
+
+/* register MTX_FAULT0 */
+#define MTX_CORE_MTX_FAULT0_OFFSET 0x0090
+
+/* register MTX_REGISTER_READ_WRITE_DATA */
+#define MTX_CORE_MTX_REG_READ_WRITE_DATA_OFFSET 0x00F8
+
+/* register MTX_REGISTER_READ_WRITE_REQUEST */
+#define MTX_CORE_MTX_REG_READ_WRITE_REQUEST_OFFSET 0x00FC
+
+#define MTX_CORE_MTX_REG_READ_WRITE_REQUEST_MTX_DREADY_MASK 0x80000000
+#define MTX_CORE_MTX_REG_READ_WRITE_REQUEST_MTX_DREADY_SHIFT 31
+
+#define MTX_CORE_MTX_REG_READ_WRITE_REQUEST_MTX_RNW_MASK 0x00010000
+#define MTX_CORE_MTX_REG_READ_WRITE_REQUEST_MTX_RNW_SHIFT 16
+
+#define MTX_CORE_MTX_REG_READ_WRITE_REQUEST_MTX_RSPECIFIER_MASK 0x00000070
+#define MTX_CORE_MTX_REG_READ_WRITE_REQUEST_MTX_RSPECIFIER_SHIFT 4
+
+#define MTX_CORE_MTX_REG_READ_WRITE_REQUEST_MTX_USPECIFIER_MASK 0x0000000F
+#define MTX_CORE_MTX_REG_READ_WRITE_REQUEST_MTX_USPECIFIER_SHIFT 0
+
+/* register MTX_RAM_ACCESS_DATA_EXCHANGE */
+#define MTX_CORE_MTX_RAM_ACCESS_DATA_EXCHANGE_OFFSET 0x0100
+
+/* register MTX_RAM_ACCESS_DATA_TRANSFER */
+#define MTX_CORE_MTX_RAM_ACCESS_DATA_TRANSFER_OFFSET 0x0104
+
+/* register MTX_RAM_ACCESS_CONTROL */
+#define MTX_CORE_MTX_RAM_ACCESS_CONTROL_OFFSET 0x0108
+
+#define MTX_CORE_MTX_RAM_ACCESS_CONTROL_MTX_MCMID_MASK 0x0FF00000
+#define MTX_CORE_MTX_RAM_ACCESS_CONTROL_MTX_MCMID_SHIFT 20
+
+#define MTX_CORE_MTX_RAM_ACCESS_CONTROL_MTX_MCM_ADDR_MASK 0x000FFFFC
+#define MTX_CORE_MTX_RAM_ACCESS_CONTROL_MTX_MCM_ADDR_SHIFT 2
+
+#define MTX_CORE_MTX_RAM_ACCESS_CONTROL_MTX_MCMAI_MASK 0x00000002
+#define MTX_CORE_MTX_RAM_ACCESS_CONTROL_MTX_MCMAI_SHIFT 1
+
+#define MTX_CORE_MTX_RAM_ACCESS_CONTROL_MTX_MCMR_MASK 0x00000001
+#define MTX_CORE_MTX_RAM_ACCESS_CONTROL_MTX_MCMR_SHIFT 0
+
+/* register MTX_RAM_ACCESS_STATUS */
+#define MTX_CORE_MTX_RAM_ACCESS_STATUS_OFFSET 0x010C
+
+#define MTX_CORE_MTX_RAM_ACCESS_STATUS_MTX_MTX_MCM_STAT_MASK 0x00000001
+#define MTX_CORE_MTX_RAM_ACCESS_STATUS_MTX_MTX_MCM_STAT_SHIFT 0
+
+/* register MTX_SOFT_RESET */
+#define MTX_CORE_MTX_SOFT_RESET_OFFSET 0x0200
+
+#define MTX_CORE_MTX_SOFT_RESET_MTX_RESET_MASK 0x00000001
+#define MTX_CORE_MTX_SOFT_RESET_MTX_RESET_SHIFT 0
+
+/* register MTX_SYSC_TIMERDIV */
+#define MTX_CORE_MTX_SYSC_TIMERDIV_OFFSET 0x0208
+
+#define MTX_CORE_MTX_SYSC_TIMERDIV_TIMER_EN_MASK 0x00010000
+#define MTX_CORE_MTX_SYSC_TIMERDIV_TIMER_EN_SHIFT 16
+
+#define MTX_CORE_MTX_SYSC_TIMERDIV_TIMER_DIV_MASK 0x000000FF
+#define MTX_CORE_MTX_SYSC_TIMERDIV_TIMER_DIV_SHIFT 0
+
+/* register MTX_SYSC_CDMAA */
+#define MTX_CORE_MTX_SYSC_CDMAA_OFFSET 0x0344
+
+#define MTX_CORE_MTX_SYSC_CDMAA_CDMAA_ADDRESS_MASK 0x03FFFFFC
+#define MTX_CORE_MTX_SYSC_CDMAA_CDMAA_ADDRESS_SHIFT 2
+
+/* register MTX_SYSC_CDMAC */
+#define MTX_CORE_MTX_SYSC_CDMAC_OFFSET 0x0340
+
+#define MTX_CORE_MTX_SYSC_CDMAC_BURSTSIZE_MASK 0x07000000
+#define MTX_CORE_MTX_SYSC_CDMAC_BURSTSIZE_SHIFT 24
+
+#define MTX_CORE_MTX_SYSC_CDMAC_RNW_MASK 0x00020000
+#define MTX_CORE_MTX_SYSC_CDMAC_RNW_SHIFT 17
+
+#define MTX_CORE_MTX_SYSC_CDMAC_ENABLE_MASK 0x00010000
+#define MTX_CORE_MTX_SYSC_CDMAC_ENABLE_SHIFT 16
+
+#define MTX_CORE_MTX_SYSC_CDMAC_LENGTH_MASK 0x0000FFFF
+#define MTX_CORE_MTX_SYSC_CDMAC_LENGTH_SHIFT 0
+
+/* register MTX_SYSC_CDMAT */
+#define MTX_CORE_MTX_SYSC_CDMAT_OFFSET 0x0350
+
+/* ****************** IMG_VIDEO_BUS4_MMU registers group ******************** */
+
+/* register MMU_CONTROL0_ */
+#define IMG_VIDEO_BUS4_MMU_MMU_CONTROL0_USE_TILE_STRIDE_PER_CTX_MASK 0x00010000
+#define IMG_VIDEO_BUS4_MMU_MMU_CONTROL0_USE_TILE_STRIDE_PER_CTX_SHIFT 16
+
+#define IMG_VIDEO_BUS4_MMU_MMU_ADDRESS_CONTROL_MMU_ENA_EXT_ADDR_MASK 0x00000010
+#define IMG_VIDEO_BUS4_MMU_MMU_ADDRESS_CONTROL_MMU_ENA_EXT_ADDR_SHIFT 4
+
+#define IMG_VIDEO_BUS4_MMU_MMU_ADDRESS_CONTROL_UPPER_ADDR_FIXED_MASK 0x00FF0000
+#define IMG_VIDEO_BUS4_MMU_MMU_ADDRESS_CONTROL_UPPER_ADDR_FIXED_SHIFT 16
+
+#define IMG_VIDEO_BUS4_MMU_MMU_MEM_EXT_OUTSTANDING_READ_WORDS_MASK 0x0000FFFF
+#define IMG_VIDEO_BUS4_MMU_MMU_MEM_EXT_OUTSTANDING_READ_WORDS_SHIFT 0
+
+/* *************************** MMU-related values ************************** */
+
+/* MMU page size */
+
+enum {
+	VXD_MMU_SOFT_PAGE_SIZE_PAGE_64K    = 0x4,
+	VXD_MMU_SOFT_PAGE_SIZE_PAGE_16K    = 0x2,
+	VXD_MMU_SOFT_PAGE_SIZE_PAGE_4K     = 0x0,
+	VXD_MMU_SOFT_PAGE_SIZE_FORCE32BITS = 0x7FFFFFFFU
+};
+
+/* MMU PTD entry flags */
+enum {
+	VXD_MMU_PTD_FLAG_NONE            = 0x0,
+	VXD_MMU_PTD_FLAG_VALID           = 0x1,
+	VXD_MMU_PTD_FLAG_WRITE_ONLY      = 0x2,
+	VXD_MMU_PTD_FLAG_READ_ONLY       = 0x4,
+	VXD_MMU_PTD_FLAG_CACHE_COHERENCY = 0x8,
+	VXD_MMU_PTD_FLAG_FORCE32BITS     = 0x7FFFFFFFU
+};
+
+/* ********************* PVDEC_PIXEL registers group *********************** */
+
+/* register PVDEC_PIXEL_PIXEL_CONTROL_0 */
+#define PVDEC_PIXEL_PIXEL_CONTROL_0_OFFSET 0x0004
+
+#define PVDEC_PIXEL_PIXEL_CONTROL_0_DMAC_CH_SEL_FOR_MTX_MASK 0x0000000E
+#define PVDEC_PIXEL_PIXEL_CONTROL_0_DMAC_CH_SEL_FOR_MTX_SHIFT 1
+
+#define PVDEC_PIXEL_PIXEL_CONTROL_0_PROC_DMAC_CH0_SEL_MASK 0x00000001
+#define PVDEC_PIXEL_PIXEL_CONTROL_0_PROC_DMAC_CH0_SEL_SHIFT 0
+
+/* register PVDEC_PIXEL_MAN_CLK_ENABLE */
+#define PVDEC_PIXEL_PIXEL_MAN_CLK_ENA_OFFSET 0x0020
+
+#define PVDEC_PIXEL_PIXEL_MAN_CLK_ENA_PIXEL_REG_MAN_CLK_ENA_MASK 0x00020000
+#define PVDEC_PIXEL_PIXEL_MAN_CLK_ENA_PIXEL_REG_MAN_CLK_ENA_SHIFT 17
+
+#define PVDEC_PIXEL_PIXEL_MAN_CLK_ENA_PIXEL_DMAC_MAN_CLK_ENA_MASK 0x00010000
+#define PVDEC_PIXEL_PIXEL_MAN_CLK_ENA_PIXEL_DMAC_MAN_CLK_ENA_SHIFT 16
+
+/* register PIXEL_PIPE_CONFIG */
+#define PVDEC_PIXEL_PIXEL_PIPE_CONFIG_OFFSET 0x00C0
+
+/* register PIXEL_MISC_CONFIG */
+#define PVDEC_PIXEL_PIXEL_MISC_CONFIG_OFFSET 0x00C4
+
+/* register MAX_FRAME_CONFIG */
+#define PVDEC_PIXEL_MAX_FRAME_CONFIG_OFFSET 0x00C8
+
+/* ********************* PVDEC_ENTROPY registers group ********************* */
+
+/* Register PVDEC_ENTROPY_MAN_CLK_ENABLE */
+#define PVDEC_ENTROPY_ENTROPY_MAN_CLK_ENA_OFFSET 0x0020
+
+/* Register PVDEC_ENTROPY_LAST_LAST_MB */
+#define PVDEC_ENTROPY_ENTROPY_LAST_MB_OFFSET 0x00BC
+
+/* ********************* PVDEC_VEC_BE registers group ********************** */
+
+/* Register PVDEC_VEC_BE_VEC_BE_STATUS */
+#define PVDEC_VEC_BE_VEC_BE_STATUS_OFFSET 0x0018
+
+/* ********************* MSVDX_VEC registers group ************************* */
+
+/* Register MSVDX_VEC_VEC_ENTDEC_INFORMATION */
+#define MSVDX_VEC_VEC_ENTDEC_INFORMATION_OFFSET 0x00AC
+
+/* ********************* MSVDX_VDMC registers group ************************ */
+
+/* Register MSVDX_VDMC_VDMC_MACROBLOCK_NUMBER */
+#define MSVDX_VDMC_VDMC_MACROBLOCK_NUMBER_OFFSET 0x0048
+
+/* ************************** DMAC registers group ************************* */
+
+/* register DMAC_SETUP */
+#define DMAC_DMAC_SETUP_OFFSET 0x0000
+#define DMAC_DMAC_SETUP_STRIDE 32
+#define DMAC_DMAC_SETUP_NO_ENTRIES 6
+
+/* register DMAC_COUNT */
+#define DMAC_DMAC_COUNT_OFFSET 0x0004
+#define DMAC_DMAC_COUNT_STRIDE 32
+#define DMAC_DMAC_COUNT_NO_ENTRIES 6
+
+#define DMAC_DMAC_COUNT_LIST_IEN_MASK 0x80000000
+#define DMAC_DMAC_COUNT_LIST_IEN_SHIFT 31
+
+#define DMAC_DMAC_COUNT_BSWAP_MASK 0x40000000
+#define DMAC_DMAC_COUNT_BSWAP_SHIFT 30
+
+#define DMAC_DMAC_COUNT_TRANSFER_IEN_MASK 0x20000000
+#define DMAC_DMAC_COUNT_TRANSFER_IEN_SHIFT 29
+
+#define DMAC_DMAC_COUNT_PW_MASK 0x18000000
+#define DMAC_DMAC_COUNT_PW_SHIFT 27
+
+#define DMAC_DMAC_COUNT_DIR_MASK 0x04000000
+#define DMAC_DMAC_COUNT_DIR_SHIFT 26
+
+#define DMAC_DMAC_COUNT_PI_MASK 0x03000000
+#define DMAC_DMAC_COUNT_PI_SHIFT 24
+
+#define DMAC_DMAC_COUNT_LIST_FIN_CTL_MASK 0x00400000
+#define DMAC_DMAC_COUNT_LIST_FIN_CTL_SHIFT 22
+
+#define DMAC_DMAC_COUNT_DREQ_MASK 0x00100000
+#define DMAC_DMAC_COUNT_DREQ_SHIFT 20
+
+#define DMAC_DMAC_COUNT_SRST_MASK 0x00080000
+#define DMAC_DMAC_COUNT_SRST_SHIFT 19
+
+#define DMAC_DMAC_COUNT_LIST_EN_MASK 0x00040000
+#define DMAC_DMAC_COUNT_LIST_EN_SHIFT 18
+
+#define DMAC_DMAC_COUNT_ENABLE_2D_MODE_MASK 0x00020000
+#define DMAC_DMAC_COUNT_ENABLE_2D_MODE_SHIFT 17
+
+#define DMAC_DMAC_COUNT_EN_MASK 0x00010000
+#define DMAC_DMAC_COUNT_EN_SHIFT 16
+
+#define DMAC_DMAC_COUNT_CNT_MASK 0x0000FFFF
+#define DMAC_DMAC_COUNT_CNT_SHIFT 0
+
+/* register DMAC_PERIPH */
+#define DMAC_DMAC_PERIPH_OFFSET 0x0008
+#define DMAC_DMAC_PERIPH_STRIDE 32
+#define DMAC_DMAC_PERIPH_NO_ENTRIES 6
+
+#define DMAC_DMAC_PERIPH_ACC_DEL_MASK 0xE0000000
+#define DMAC_DMAC_PERIPH_ACC_DEL_SHIFT 29
+
+#define DMAC_DMAC_PERIPH_INCR_MASK 0x08000000
+#define DMAC_DMAC_PERIPH_INCR_SHIFT 27
+
+#define DMAC_DMAC_PERIPH_BURST_MASK 0x07000000
+#define DMAC_DMAC_PERIPH_BURST_SHIFT 24
+
+#define DMAC_DMAC_PERIPH_EXT_BURST_MASK 0x000F0000
+#define DMAC_DMAC_PERIPH_EXT_BURST_SHIFT 16
+
+#define DMAC_DMAC_PERIPH_EXT_SA_MASK 0x0000000F
+#define DMAC_DMAC_PERIPH_EXT_SA_SHIFT 0
+
+/* register DMAC_IRQ_STAT */
+#define DMAC_DMAC_IRQ_STAT_OFFSET 0x000C
+#define DMAC_DMAC_IRQ_STAT_STRIDE 32
+#define DMAC_DMAC_IRQ_STAT_NO_ENTRIES 6
+
+/* register DMAC_PERIPHERAL_ADDR */
+#define DMAC_DMAC_PERIPH_ADDR_OFFSET 0x0014
+#define DMAC_DMAC_PERIPH_ADDR_STRIDE 32
+#define DMAC_DMAC_PERIPH_ADDR_NO_ENTRIES 6
+
+#define DMAC_DMAC_PERIPH_ADDR_ADDR_MASK 0x007FFFFF
+#define DMAC_DMAC_PERIPH_ADDR_ADDR_SHIFT 0
+
+/* register DMAC_PER_HOLD */
+#define DMAC_DMAC_PER_HOLD_OFFSET 0x0018
+#define DMAC_DMAC_PER_HOLD_STRIDE 32
+#define DMAC_DMAC_PER_HOLD_NO_ENTRIES 6
+
+#define DMAC_DMAC_PER_HOLD_PER_HOLD_MASK 0x0000001F
+#define DMAC_DMAC_PER_HOLD_PER_HOLD_SHIFT 0
+
+#define DMAC_DMAC_SOFT_RESET_OFFSET 0x00C0
+
+/* ************************** DMAC-related values *************************** */
+
+/*
+ * This type defines whether the peripheral address is static or
+ * auto-incremented. (see the TRM "Transfer Sequence Linked-list - INCR")
+ */
+enum {
+	DMAC_INCR_OFF         = 0, /* No action, no increment. */
+	DMAC_INCR_ON          = 1, /* Generate address increment. */
+	DMAC_INCR_FORCE32BITS = 0x7FFFFFFFU
+};
+
+/* Burst size settings (see the TRM "Transfer Sequence Linked-list - BURST"). */
+enum {
+	DMAC_BURST_0           = 0x0, /* burst size of 0 */
+	DMAC_BURST_1           = 0x1, /* burst size of 1 */
+	DMAC_BURST_2           = 0x2, /* burst size of 2 */
+	DMAC_BURST_3           = 0x3, /* burst size of 3 */
+	DMAC_BURST_4           = 0x4, /* burst size of 4 */
+	DMAC_BURST_5           = 0x5, /* burst size of 5 */
+	DMAC_BURST_6           = 0x6, /* burst size of 6 */
+	DMAC_BURST_7           = 0x7, /* burst size of 7 */
+	DMAC_BURST_8           = 0x8, /* burst size of 8 */
+	DMAC_BURST_FORCE32BITS = 0x7FFFFFFFU
+};
+
+/*
+ * Extended burst size settings (see TRM "Transfer Sequence Linked-list -
+ * EXT_BURST").
+ */
+enum {
+	DMAC_EXT_BURST_0           = 0x0, /* no extension */
+	DMAC_EXT_BURST_1           = 0x1, /* extension of 8 */
+	DMAC_EXT_BURST_2           = 0x2, /* extension of 16 */
+	DMAC_EXT_BURST_3           = 0x3, /* extension of 24 */
+	DMAC_EXT_BURST_4           = 0x4, /* extension of 32 */
+	DMAC_EXT_BURST_5           = 0x5, /* extension of 40 */
+	DMAC_EXT_BURST_6           = 0x6, /* extension of 48 */
+	DMAC_EXT_BURST_7           = 0x7, /* extension of 56 */
+	DMAC_EXT_BURST_8           = 0x8, /* extension of 64 */
+	DMAC_EXT_BURST_9           = 0x9, /* extension of 72 */
+	DMAC_EXT_BURST_10          = 0xa, /* extension of 80 */
+	DMAC_EXT_BURST_11          = 0xb, /* extension of 88 */
+	DMAC_EXT_BURST_12          = 0xc, /* extension of 96 */
+	DMAC_EXT_BURST_13          = 0xd, /* extension of 104 */
+	DMAC_EXT_BURST_14          = 0xe, /* extension of 112 */
+	DMAC_EXT_BURST_15          = 0xf, /* extension of 120 */
+	DMAC_EXT_BURST_FORCE32BITS = 0x7FFFFFFFU
+};
+
+/* Transfer direction. */
+enum {
+	DMAC_MEM_TO_VXD         = 0x0,
+	DMAC_VXD_TO_MEM         = 0x1,
+	DMAC_VXD_TO_FORCE32BITS = 0x7FFFFFFFU
+};
+
+/* How much to increment the peripheral address. */
+enum {
+	DMAC_PI_1           = 0x2, /* increment by 1 */
+	DMAC_PI_2           = 0x1, /* increment by 2 */
+	DMAC_PI_4           = 0x0, /* increment by 4 */
+	DMAC_PI_FORCE32BITS = 0x7FFFFFFFU
+};
+
+/* Peripheral width settings (see TRM "Transfer Sequence Linked-list - PW"). */
+enum {
+	DMAC_PWIDTH_32_BIT      = 0x0, /* Peripheral width 32-bit. */
+	DMAC_PWIDTH_16_BIT      = 0x1, /* Peripheral width 16-bit. */
+	DMAC_PWIDTH_8_BIT       = 0x2, /* Peripheral width 8-bit. */
+	DMAC_PWIDTH_FORCE32BITS = 0x7FFFFFFFU
+};
+
+/* ******************************* macros ********************************** */
+
+#ifdef PVDEC_SINGLETHREADED_IO
+/* Write to the register */
+#define VXD_WR_REG_ABS(base, addr, val) \
+	({ spin_lock_irqsave(&pvdec_irq_lock, pvdec_irq_flags); \
+	   iowrite32((val), (addr) + (base)); \
+	   spin_unlock_irqrestore(&pvdec_irq_lock, (unsigned long)pvdec_irq_flags); })
+
+/* Read the register */
+#define VXD_RD_REG_ABS(base, addr) \
+	({ unsigned int reg; \
+	   spin_lock_irqsave(&pvdec_irq_lock, pvdec_irq_flags); \
+	   reg = ioread32((addr) + (base)); \
+	   spin_unlock_irqrestore(&pvdec_irq_lock, (unsigned long)pvdec_irq_flags); \
+	   reg; })
+#else /* ndef PVDEC_SINGLETHREADED_IO */
+
+/* Write to the register */
+#define VXD_WR_REG_ABS(base, addr, val) \
+	(iowrite32((val), (addr) + (base)))
+
+/* Read the register */
+#define VXD_RD_REG_ABS(base, addr) \
+	(ioread32((addr) + (base)))
+
+#endif
+
+/* Get offset of a register */
+#define VXD_GET_REG_OFF(group, reg) \
+	(group ## _OFFSET + group ## _ ## reg ## _OFFSET)
+
+/* Get offset of a repated register */
+#define VXD_GET_RPT_REG_OFF(group, reg, index) \
+	(VXD_GET_REG_OFF(group, reg) + ((index) * group ## _ ## reg ## _STRIDE))
+
+/* Extract field from a register */
+#define VXD_RD_REG_FIELD(val, group, reg, field) \
+	(((val) & group ## _ ## reg ## _ ## field ## _MASK) >> \
+	 group ## _ ## reg ## _ ## field ## _SHIFT)
+
+/* Shift provided value by number of bits relevant to register specification */
+#define VXD_ENC_REG_FIELD(group, reg, field, val) \
+	((unsigned int)(val) << (group ## _ ## reg ## _ ## field ## _SHIFT))
+
+/* Update the field in a register */
+#define VXD_WR_REG_FIELD(reg_val, group, reg, field, val) \
+	(((reg_val) & ~(group ## _ ## reg ## _ ## field ## _MASK)) | \
+	 (VXD_ENC_REG_FIELD(group, reg, field, val) & \
+	  (group ## _ ## reg ## _ ## field ## _MASK)))
+
+/* Write to a register */
+#define VXD_WR_REG(base, group, reg, val) \
+	VXD_WR_REG_ABS(base, VXD_GET_REG_OFF(group, reg), val)
+
+/* Write to a repeated register */
+#define VXD_WR_RPT_REG(base, group, reg, val, index) \
+	VXD_WR_REG_ABS(base, VXD_GET_RPT_REG_OFF(group, reg, index), val)
+
+/* Read a register */
+#define VXD_RD_REG(base, group, reg) \
+	VXD_RD_REG_ABS(base, VXD_GET_REG_OFF(group, reg))
+
+/* Read a repeated register */
+#define VXD_RD_RPT_REG(base, group, reg, index) \
+	VXD_RD_REG_ABS(base, VXD_GET_RPT_REG_OFF(group, reg, index))
+
+/* Insert word into the message buffer */
+#define VXD_WR_MSG_WRD(buf, msg_type, wrd, val) \
+	(((unsigned int *)buf)[(msg_type ## _ ## wrd ## _OFFSET) / sizeof(unsigned int)] = \
+		 val)
+
+/* Get a word from the message buffer */
+#define VXD_RD_MSG_WRD(buf, msg_type, wrd) \
+	(((unsigned int *)buf)[(msg_type ## _ ## wrd ## _OFFSET) / sizeof(unsigned int)])
+
+/* Get offset for pipe register */
+#define VXD_GET_PIPE_OFF(num_pipes, pipe) \
+	((num_pipes) > 1 ? ((pipe) << 16) : 0)
+
+#endif /* VXD_PVDEC_REGS_H */
-- 
2.17.1


-- 






This
message contains confidential information and is intended only 
for the
individual(s) named. If you are not the intended
recipient, you are 
notified that disclosing, copying, distributing or taking any
action in 
reliance on the contents of this mail and attached file/s is strictly
prohibited. Please notify the
sender immediately and delete this e-mail 
from your system. E-mail transmission
cannot be guaranteed to be secured or 
error-free as information could be
intercepted, corrupted, lost, destroyed, 
arrive late or incomplete, or contain
viruses. The sender therefore does 
not accept liability for any errors or
omissions in the contents of this 
message, which arise as a result of e-mail
transmission.

^ permalink raw reply related	[flat|nested] 48+ messages in thread

* [PATCH 05/30] v4l: vxd-dec: Add IMG VXD Video Decoder mem to mem drive
  2021-08-18 14:10 [PATCH 00/30] TI Video Decoder driver upstreaming to v5.14-rc6 kernel sidraya.bj
                   ` (3 preceding siblings ...)
  2021-08-18 14:10 ` [PATCH 04/30] v4l: vxd-dec: Add vxd " sidraya.bj
@ 2021-08-18 14:10 ` sidraya.bj
  2021-08-18 14:10 ` [PATCH 06/30] v4l: vxd-dec: Add hardware control modules sidraya.bj
                   ` (26 subsequent siblings)
  31 siblings, 0 replies; 48+ messages in thread
From: sidraya.bj @ 2021-08-18 14:10 UTC (permalink / raw)
  To: gregkh, linux-staging, linux-kernel
  Cc: prashanth.ka, praneeth, mchehab, linux-media, praveen.ap, Sidraya

From: Sidraya <sidraya.bj@pathpartnertech.com>

The IMG VXD Video Decoder uses the IMG D5520 to provide video
decoding for various codecs including H.264 and HEVC.
Scaling and rotation are also supported by the hardware
and driver. The driver also supports multiple simultaneous
video decodes.

Each mem2mem context is a single stream decode session.
Each session creates it's own vxd context and associated
mem mgr and mmu contexts. Firmware loading, firmware messaging,
and hardware power management (reset) are supported, as well as MMU
programming of the HW.

This patch adds the framework for the v4l2 IMG VXD video decoder
driver, supporting HW initialization, MMU mapping and buffer management.
The decoding functionality is not yet implemented.

Signed-off-by: Buddy Liong <buddy.liong@ti.com>
Signed-off-by: Angela Stegmaier <angelabaker@ti.com>
Signed-off-by: Sidraya <sidraya.bj@pathpartnertech.com>
---
 MAINTAINERS                                  |    3 +
 drivers/staging/media/vxd/decoder/vxd_core.c | 1683 ++++++++++++++++++
 drivers/staging/media/vxd/decoder/vxd_dec.c  |  185 ++
 drivers/staging/media/vxd/decoder/vxd_dec.h  |  477 +++++
 4 files changed, 2348 insertions(+)
 create mode 100644 drivers/staging/media/vxd/decoder/vxd_core.c
 create mode 100644 drivers/staging/media/vxd/decoder/vxd_dec.c
 create mode 100644 drivers/staging/media/vxd/decoder/vxd_dec.h

diff --git a/MAINTAINERS b/MAINTAINERS
index 0f8154b69a91..47067f907539 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -19543,6 +19543,9 @@ F:	drivers/staging/media/vxd/common/img_mem_unified.c
 F:	drivers/staging/media/vxd/common/imgmmu.c
 F:	drivers/staging/media/vxd/common/imgmmu.h
 F:	drivers/staging/media/vxd/decoder/img_dec_common.h
+F:	drivers/staging/media/vxd/decoder/vxd_core.c
+F:	drivers/staging/media/vxd/decoder/vxd_dec.c
+F:	drivers/staging/media/vxd/decoder/vxd_dec.h
 F:	drivers/staging/media/vxd/decoder/vxd_pvdec.c
 F:	drivers/staging/media/vxd/decoder/vxd_pvdec_priv.h
 F:	drivers/staging/media/vxd/decoder/vxd_pvdec_regs.h
diff --git a/drivers/staging/media/vxd/decoder/vxd_core.c b/drivers/staging/media/vxd/decoder/vxd_core.c
new file mode 100644
index 000000000000..b502c33e6456
--- /dev/null
+++ b/drivers/staging/media/vxd/decoder/vxd_core.c
@@ -0,0 +1,1683 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * IMG DEC VXD Core component function implementations
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ *	Angela Stegmaier <angelabaker@ti.com>
+ *
+ * Re-written for upstreamimg
+ *	Sidraya Jayagond <sidraya.bj@pathpartnertech.com>
+ *	Prashanth Kumar Amai <prashanth.ka@pathpartnertech.com>
+ */
+
+#include <linux/firmware.h>
+#include <linux/completion.h>
+#include <linux/slab.h>
+#include <linux/idr.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/printk.h>
+#include <linux/mutex.h>
+#include <linux/delay.h>
+#include <linux/jiffies.h>
+#include <linux/time64.h>
+#include <linux/dma-mapping.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-mem2mem.h>
+
+#include "img_dec_common.h"
+#include "vxd_pvdec_priv.h"
+
+#define VXD_RENDEC_SIZE (5 * 1024 * 1024)
+
+#define VXD_MSG_CNT_SHIFT 8
+#define VXD_MSG_CNT_MASK 0xff00
+#define VXD_MAX_MSG_CNT ((1 << VXD_MSG_CNT_SHIFT) - 1)
+#define VXD_MSG_STR_MASK 0xff
+#define VXD_INVALID_ID (-1)
+
+#define MAP_FIRMWARE_TO_STREAM 1
+
+/* Has to be used with VXD->mutex acquired! */
+#define VXD_GEN_MSG_ID(VXD, STR_ID, MSG_ID, vxd_type, str_type) \
+	do { \
+		vxd_type __VXD = VXD; \
+		str_type __STR_ID = STR_ID; \
+		WARN_ON((__STR_ID) > VXD_MSG_STR_MASK); \
+		(__VXD)->msg_cnt = (__VXD)->msg_cnt + 1 % (VXD_MAX_MSG_CNT); \
+		(MSG_ID) = ((__VXD)->msg_cnt << VXD_MSG_CNT_SHIFT) | \
+			((__STR_ID) & VXD_MSG_STR_MASK); \
+	} while (0)
+
+/* Have to be used with VXD->mutex acquired! */
+#define VXD_RET_MSG_ID(VXD) ((VXD)->msg_cnt--)
+
+#define VXD_MSG_ID_GET_STR_ID(MSG_ID) \
+	((MSG_ID) & VXD_MSG_STR_MASK)
+
+#define VXD_MSG_ID_GET_CNT(MSG_ID) \
+	(((MSG_ID) & VXD_MSG_CNT_MASK) >> VXD_MSG_CNT_SHIFT)
+
+static const unsigned char *drv_fw_name = "pvdec_full_bin.fw";
+
+/* Driver context */
+static struct {
+	/* Available memory heaps. List of <struct vxd_heap> */
+	struct list_head heaps;
+	/* heap id for all internal allocations (rendec, firmware) */
+	int internal_heap_id;
+
+	/* Memory Management context for driver */
+	struct mem_ctx *mem_ctx;
+
+	/* List of associated <struct vxd_dev> */
+	struct list_head devices;
+
+	/* Virtual addresses of shared buffers, common for all streams. */
+	struct {
+		unsigned int fw_addr; /* Firmware blob */
+		unsigned int rendec_addr; /* Rendec buffer */
+	} virt_space;
+
+	int initialised;
+} vxd_drv;
+
+/*
+ * struct vxd_heap - node for heaps list
+ * @id:   heap id
+ * @list: Entry in <struct vxd_drv:heaps>
+ */
+struct vxd_heap {
+	int id;
+	struct list_head list;
+};
+
+static void img_mmu_callback(enum mmu_callback_type callback_type,
+			     int buff_id, void *data)
+{
+	struct vxd_dev *vxd = data;
+
+	if (!vxd)
+		return;
+
+	if (callback_type == MMU_CALLBACK_MAP)
+		return;
+
+	if (vxd->hw_on)
+		vxd_pvdec_mmu_flush(vxd->dev, vxd->reg_base);
+}
+
+static int vxd_is_apm_required(struct vxd_dev *vxd)
+{
+	return vxd->hw_on;
+}
+
+/*
+ * Power on the HW.
+ * Call with vxd->mutex acquired.
+ */
+static int vxd_make_hw_on_locked(struct vxd_dev *vxd, unsigned int fw_ptd)
+{
+	unsigned int fw_size;
+	struct vxd_fw_hdr *fw_hdr;
+	struct vxd_ena_params ena_params;
+	int ret;
+
+#ifdef DEBUG_DECODER_DRIVER
+	dev_dbg(vxd->dev, "%s:%d\n", __func__, __LINE__);
+#endif
+	if (vxd->hw_on)
+		return 0;
+
+#ifdef DEBUG_DECODER_DRIVER
+	dev_dbg(vxd->dev, "%s: enabling HW\n", __func__);
+#endif
+
+	fw_size = vxd->firmware.fw_size;
+	fw_hdr = vxd->firmware.hdr;
+	if (!fw_size || !fw_hdr) {
+		dev_err(vxd->dev, "%s: firmware missing!\n", __func__);
+		return -ENOENT;
+	}
+
+	memset(&ena_params, 0, sizeof(struct vxd_ena_params));
+
+	ena_params.fw_buf_size = fw_size - sizeof(struct vxd_fw_hdr);
+	ena_params.fw_buf_virt_addr = vxd_drv.virt_space.fw_addr;
+	ena_params.ptd = fw_ptd;
+	ena_params.boot_poll.msleep_cycles = 50;
+	ena_params.crc = 0;
+	ena_params.rendec_addr = vxd_drv.virt_space.rendec_addr;
+	ena_params.rendec_size = (VXD_NUM_PIX_PIPES(vxd->props) *
+		VXD_RENDEC_SIZE) / 4096u;
+
+	ena_params.secure = 0;
+	ena_params.wait_dbg_fifo = 0;
+	ena_params.mem_staller.data = NULL;
+	ena_params.mem_staller.size = 0;
+
+	ret = vxd_pvdec_ena(vxd->dev, vxd->reg_base, &ena_params,
+			    fw_hdr, &vxd->freq_khz);
+	/*
+	 * Ignore the return code, proceed as usual, it will be returned anyway.
+	 * The HW is turned on, so we can perform post mortem analysis,
+	 * and collect the fw logs when available.
+	 */
+
+	vxd->hw_on = 1;
+
+	return ret;
+}
+
+/*
+ * Power off the HW.
+ * Call with vxd->mutex acquired.
+ */
+static void vxd_make_hw_off_locked(struct vxd_dev *vxd, unsigned char suspending)
+{
+	int ret;
+
+	if (!vxd->hw_on)
+		return;
+
+#ifdef DEBUG_DECODER_DRIVER
+	dev_dbg(vxd->dev, "%s:%d\n", __func__, __LINE__);
+#endif
+
+	ret = vxd_pvdec_dis(vxd->dev, vxd->reg_base);
+	vxd->hw_on = 0;
+	if (ret)
+		dev_err(vxd->dev, "%s: failed to power off the VXD!\n", __func__);
+}
+
+/*
+ * Moves all valid items from the queue of items being currently processed to
+ * the pending queue.
+ * Call with vxd->mutex locked
+ */
+static void vxd_rewind_msgs_locked(struct vxd_dev *vxd)
+{
+	struct vxd_item *item, *tmp;
+
+	if (list_empty(&vxd->msgs))
+		return;
+
+	list_for_each_entry_safe(item, tmp, &vxd->msgs, list)
+		list_move(&item->list, &vxd->pend);
+}
+
+static void vxd_report_item_locked(struct vxd_dev *vxd,
+				   struct vxd_item *item,
+				   unsigned int flags)
+{
+	struct vxd_stream *stream;
+
+	__list_del_entry(&item->list);
+	stream = idr_find(vxd->streams, item->stream_id);
+	if (!stream) {
+		/*
+		 * Failed to find associated stream. Probably it was
+		 * already destroyed -- drop the item
+		 */
+#ifdef DEBUG_DECODER_DRIVER
+		dev_dbg(vxd->dev, "%s: drop item %p [0x%x]\n", __func__, item, item->msg_id);
+#endif
+		kfree(item);
+	} else {
+		item->msg.out_flags |= flags;
+		list_add_tail(&item->list, &stream->ctx->items_done);
+#ifdef DEBUG_DECODER_DRIVER
+		dev_dbg(vxd->dev, "%s: waking %p\n", __func__, stream->ctx);
+
+		dev_info(vxd->dev, "%s: signaling worker for %p\n", __func__, stream->ctx);
+#endif
+		schedule_work(stream->ctx->work);
+	}
+}
+
+/*
+ * Rewind all items to the pending queue and report those to listener.
+ * Postpone the reset.
+ * Call with vxd->mutex acquired.
+ */
+static void vxd_emrg_reset_locked(struct vxd_dev *vxd, unsigned int flags)
+{
+	cancel_delayed_work(vxd->dwork);
+
+	vxd->emergency = 1;
+
+#ifdef ERROR_RECOVERY_SIMULATION
+	if (disable_fw_irq_value != 0) {
+		/*
+		 * Previously we have disabled IRQ, now enable it. This
+		 * condition will occur only when the firmware non responsiveness
+		 * will be detected on vxd_worker thread. Once we reproduce the
+		 * issue we will enable the IRQ so that the code flow continues.
+		 */
+		enable_irq(g_module_irq);
+	}
+#endif
+
+	/*
+	 * If the firmware sends more than one reply per item, it's possible
+	 * that corresponding item was already removed from vxd-msgs, but the
+	 * HW was still processing it and MMU page fault could happen and
+	 * trigger execution of this function. So make sure that vxd->msgs
+	 * is not empty before rewinding items.
+	 */
+	if (!list_empty(&vxd->msgs))
+		/* Move all valid items to the pending queue */
+		vxd_rewind_msgs_locked(vxd);
+
+	{
+		struct vxd_item *item, *tmp;
+
+		list_for_each_entry_safe(item, tmp, &vxd->pend, list) {
+			/*
+			 * Exclusive items that were on the pending list
+			 * must be reported as canceled
+			 */
+			if ((item->msg.out_flags & VXD_FW_MSG_FLAG_EXCL) && !item->msg_id)
+				item->msg.out_flags |= VXD_FW_MSG_FLAG_CANCELED;
+
+			vxd_report_item_locked(vxd, item, flags);
+		}
+	}
+}
+
+static void vxd_handle_io_error_locked(struct vxd_dev *vxd)
+{
+	struct vxd_item *item, *tmp;
+	unsigned int pend_flags = !vxd->hw_on ? VXD_FW_MSG_FLAG_DEV_ERR :
+		VXD_FW_MSG_FLAG_CANCELED;
+
+	list_for_each_entry_safe(item, tmp, &vxd->msgs, list)
+		vxd_report_item_locked(vxd, item, VXD_FW_MSG_FLAG_DEV_ERR);
+
+	list_for_each_entry_safe(item, tmp, &vxd->pend, list)
+		vxd_report_item_locked(vxd, item, pend_flags);
+}
+
+static void vxd_sched_worker_locked(struct vxd_dev *vxd, unsigned int delay_ms)
+{
+	unsigned long long work_at = jiffies + msecs_to_jiffies(delay_ms);
+	int ret;
+
+	/*
+	 * Try to queue the work.
+	 * This may be also called from the worker context,
+	 * so we need to re-arm anyway in case of error
+	 */
+	ret = schedule_delayed_work(vxd->dwork, work_at - jiffies);
+	if (ret) {
+		/* Work is already in the queue */
+		/*
+		 * Check if new requested time is "before"
+		 * the last "time" we scheduled this work at,
+		 * if not, do nothing, the worker will do
+		 * recalculation for APM/DWR afterwards
+		 */
+		if (time_before((unsigned long)work_at, (unsigned long)vxd->work_sched_at)) {
+			/*
+			 * Canceling & rescheduling might be problematic,
+			 * so just modify it, when needed
+			 */
+			ret = mod_delayed_work(system_wq, vxd->dwork, work_at - jiffies);
+			if (!ret)
+				dev_err(vxd->dev, "%s: failed to modify work!\n", __func__);
+			/*
+			 * Record the 'time' this work
+			 * has been rescheduled at
+			 */
+			vxd->work_sched_at = work_at;
+		}
+	} else {
+		/* Record the 'time' this work has been scheduled at */
+		vxd->work_sched_at = work_at;
+	}
+}
+
+static void vxd_monitor_locked(struct vxd_dev *vxd)
+{
+	/* HW is dead, not much sense in rescheduling */
+	if (vxd->hw_dead)
+		return;
+
+	/*
+	 * We are not processing anything, but pending list is not empty
+	 * probably the message fifo is full, so retrigger the worker.
+	 */
+	if (!list_empty(&vxd->pend) && list_empty(&vxd->msgs))
+		vxd_sched_worker_locked(vxd, 1);
+
+	if (list_empty(&vxd->pend) && list_empty(&vxd->msgs) && vxd_is_apm_required(vxd)) {
+#ifdef DEBUG_DECODER_DRIVER
+		dev_dbg(vxd->dev, "%s: scheduling APM work (%d ms)!\n", __func__, vxd->hw_pm_delay);
+#endif
+		/*
+		 * No items to process and no items being processed -
+		 * disable the HW
+		 */
+		vxd->pm_start = jiffies;
+		vxd_sched_worker_locked(vxd, vxd->hw_pm_delay);
+		return;
+	}
+
+	if (vxd->hw_dwr_period > 0 && !list_empty(&vxd->msgs)) {
+#ifdef DEBUG_DECODER_DRIVER
+		dev_dbg(vxd->dev, "%s: scheduling DWR work (%d ms)!\n",
+			__func__, vxd->hw_dwr_period);
+#endif
+		vxd->dwr_start = jiffies;
+		vxd_sched_worker_locked(vxd, vxd->hw_dwr_period);
+	}
+}
+
+/*
+ * Take first item from pending list and submit it to the hardware.
+ * Has to be called with vxd->mutex locked.
+ */
+static int vxd_sched_single_locked(struct vxd_dev *vxd)
+{
+	struct vxd_item *item = NULL;
+	unsigned long msg_size;
+	int ret;
+
+	item = list_first_entry(&vxd->pend, struct vxd_item, list);
+
+	msg_size = item->msg.payload_size / sizeof(unsigned int);
+
+#ifdef DEBUG_DECODER_DRIVER
+	dev_dbg(vxd->dev, "%s: checking msg_size: %zu, item: %p\n", __func__, msg_size, item);
+#endif
+
+	/*
+	 * In case of exclusive item check if hw/fw is
+	 * currently processing anything.
+	 * If so we need to wait until items are returned back.
+	 */
+	if ((item->msg.out_flags & VXD_FW_MSG_FLAG_EXCL) && !list_empty(&vxd->msgs) &&
+	    /*
+	     * We can move forward if message
+	     * is about to be dropped.
+	     */
+	     !(item->msg.out_flags & VXD_FW_MSG_FLAG_DROP))
+
+		ret = -EBUSY;
+	else
+		/*
+		 * Check if there's enough space
+		 * in comms RAM to submit the message.
+		 */
+		ret = vxd_pvdec_msg_fit(vxd->dev, vxd->reg_base, msg_size);
+
+	if (ret == 0) {
+		unsigned short msg_id;
+
+		VXD_GEN_MSG_ID(vxd, item->stream_id, msg_id, struct vxd_dev*, unsigned int);
+
+		/* submit the message to the hardware */
+		ret = vxd_pvdec_send_msg(vxd->dev, vxd->reg_base,
+					 (unsigned int *)item->msg.payload, msg_size,
+					 msg_id, vxd);
+		if (ret) {
+			dev_err(vxd->dev, "%s: failed to send msg!\n", __func__);
+			VXD_RET_MSG_ID(vxd);
+		} else {
+			if (item->msg.out_flags & VXD_FW_MSG_FLAG_DROP) {
+				__list_del_entry(&item->list);
+				kfree(item);
+#ifdef DEBUG_DECODER_DRIVER
+				dev_dbg(vxd->dev, "%s: drop msg 0x%x! (user requested)\n",
+					__func__, msg_id);
+#endif
+			} else {
+				item->msg_id = msg_id;
+#ifdef DEBUG_DECODER_DRIVER
+				dev_dbg(vxd->dev,
+					"%s: moving item %p, id 0x%x to msgs\n",
+					__func__, item, item->msg_id);
+#endif
+				list_move(&item->list, &vxd->msgs);
+			}
+
+			vxd_monitor_locked(vxd);
+		}
+
+	} else if (ret == -EINVAL) {
+		dev_warn(vxd->dev, "%s: invalid msg!\n", __func__);
+		vxd_report_item_locked(vxd, item, VXD_FW_MSG_FLAG_INV);
+		/*
+		 * HW is ok, the message was invalid, so don't return an
+		 * error
+		 */
+		ret = 0;
+	} else if (ret == -EBUSY) {
+		/*
+		 * Not enough space. Message is already in the pending queue,
+		 * so it will be submitted once we've got space. Delayed work
+		 * might have been canceled (if we are currently processing
+		 * threaded irq), so make sure that DWR will trigger if it's
+		 * enabled.
+		 */
+		vxd_monitor_locked(vxd);
+	} else {
+		dev_err(vxd->dev, "%s: failed to check space for msg!\n", __func__);
+	}
+
+	return ret;
+}
+
+/*
+ * Take items from pending list and submit them to the hardware, if space is
+ * available in the ring buffer.
+ * Call with vxd->mutex locked
+ */
+static void vxd_schedule_locked(struct vxd_dev *vxd)
+{
+	unsigned char emergency = vxd->emergency;
+	int ret;
+
+	/* if HW is dead, inform the UM and skip */
+	if (vxd->hw_dead) {
+		vxd_handle_io_error_locked(vxd);
+		return;
+	}
+
+	if (!vxd->hw_on && !list_empty(&vxd->msgs))
+		dev_err(vxd->dev, "%s: msgs not empty when the HW is off!\n", __func__);
+
+	if (list_empty(&vxd->pend)) {
+		vxd_monitor_locked(vxd);
+		return;
+	}
+
+	/*
+	 * If the emergency routine was fired, the hw was left ON,so the UM
+	 * could do the post mortem analysis before submitting the next items.
+	 * Now we can switch off the hardware.
+	 */
+	if (emergency) {
+		vxd->emergency = 0;
+		vxd_make_hw_off_locked(vxd, FALSE);
+		usleep_range(1000, 2000);
+	}
+
+	/* Try to schedule */
+	ret = 0;
+	while (!list_empty(&vxd->pend) && ret == 0) {
+		struct vxd_item *item;
+		struct vxd_stream *stream;
+
+		item = list_first_entry(&vxd->pend, struct vxd_item, list);
+		stream = idr_find(vxd->streams, item->stream_id);
+
+		ret = vxd_make_hw_on_locked(vxd, stream->ptd);
+		if (ret) {
+			dev_err(vxd->dev, "%s: failed to start HW!\n", __func__);
+			vxd->hw_dead = 1;
+			vxd_handle_io_error_locked(vxd);
+			return;
+		}
+
+		ret = vxd_sched_single_locked(vxd);
+	}
+
+	if (ret != 0 && ret != -EBUSY) {
+		dev_err(vxd->dev, "%s: failed to schedule, emrg: %d!\n", __func__, emergency);
+		if (emergency) {
+			/*
+			 * Failed to schedule in the emergency mode --
+			 * there's no hope. Power off the HW, mark all
+			 * items as failed and return them.
+			 */
+			vxd_handle_io_error_locked(vxd);
+			return;
+		}
+		/* Let worker try to handle it */
+		vxd_sched_worker_locked(vxd, 0);
+	}
+}
+
+static void stream_worker(void *work)
+{
+	struct vxd_dec_ctx *ctx = NULL;
+	struct vxd_dev *vxd = NULL;
+	struct vxd_item *item;
+
+	work = get_work_buff(work, FALSE);
+	ctx = container_of(work, struct vxd_dec_ctx, work);
+	vxd = ctx->dev;
+
+#ifdef DEBUG_DECODER_DRIVER
+	dev_dbg(vxd->dev, "%s: got work for ctx %p\n", __func__, ctx);
+#endif
+
+	mutex_lock_nested(ctx->mutex, SUBCLASS_VXD_CORE);
+
+	while (!list_empty(&ctx->items_done)) {
+		item = list_first_entry(&ctx->items_done, struct vxd_item, list);
+
+		item->msg.out_flags &= VXD_FW_MSG_RD_FLAGS_MASK;
+
+#ifdef DEBUG_DECODER_DRIVER
+		dev_info(vxd->dev, "%s: item: %p, payload_size: %d, flags: 0x%x\n",
+			 __func__, item, item->msg.payload_size,
+			 item->msg.out_flags);
+#endif
+
+		if (ctx->cb)
+			ctx->cb(ctx->res_str_id, item->msg.payload,
+				item->msg.payload_size, item->msg.out_flags);
+
+		__list_del_entry(&item->list);
+		kfree(item);
+	}
+	mutex_unlock(ctx->mutex);
+}
+
+int vxd_create_ctx(struct vxd_dev *vxd, struct vxd_dec_ctx *ctx)
+{
+	int ret = 0;
+	unsigned int fw_load_retries = 2 * 1000;
+
+	while (!vxd->firmware.ready) {
+		usleep_range(1000, 2000);
+		fw_load_retries--;
+	}
+	if (vxd->firmware.buf_id == 0) {
+		dev_err(vxd->dev, "%s: request fw not yet done!\n", __func__);
+		return -EAGAIN;
+	}
+
+	/* Create memory management context for HW buffers */
+	ret = img_mem_create_ctx(&ctx->mem_ctx);
+	if (ret) {
+		dev_err(vxd->dev, "%s: failed to create mem context (err:%d)!\n", __func__, ret);
+		return ret;
+	}
+
+	ret = img_mmu_ctx_create(vxd->dev, vxd->mmu_config_addr_width,
+				 ctx->mem_ctx, vxd_drv.internal_heap_id,
+				 img_mmu_callback, vxd, &ctx->mmu_ctx);
+	if (ret) {
+		dev_err(vxd->dev, "%s:%d: failed to create mmu ctx\n", __func__, __LINE__);
+		ret = -EPERM;
+		goto out_destroy_ctx;
+	}
+
+	ret = img_mmu_map(ctx->mmu_ctx, vxd->mem_ctx, vxd->firmware.buf_id,
+			  vxd_drv.virt_space.fw_addr,
+			  VXD_MMU_PTD_FLAG_READ_ONLY);
+	if (ret) {
+		dev_err(vxd->dev, "%s:%d: failed to map firmware buffer\n", __func__, __LINE__);
+		ret = -EPERM;
+		goto out_destroy_mmu_ctx;
+	}
+
+	ret = img_mmu_map(ctx->mmu_ctx, vxd->mem_ctx, vxd->rendec_buf_id,
+			  vxd_drv.virt_space.rendec_addr,
+			  VXD_MMU_PTD_FLAG_NONE);
+	if (ret) {
+		dev_err(vxd->dev, "%s:%d: failed to map rendec buffer\n", __func__, __LINE__);
+		ret = -EPERM;
+		goto out_unmap_fw;
+	}
+
+	ret = img_mmu_get_ptd(ctx->mmu_ctx, &ctx->ptd);
+	if (ret) {
+		dev_err(vxd->dev, "%s:%d: failed to get PTD\n", __func__, __LINE__);
+		ret = -EPERM;
+		goto out_unmap_rendec;
+	}
+
+	/* load fw - turned Hw on */
+	ret = vxd_make_hw_on_locked(vxd, ctx->ptd);
+	if (ret) {
+		dev_err(vxd->dev, "%s:%d: failed to start HW\n", __func__, __LINE__);
+		ret = -EPERM;
+		vxd->hw_on = FALSE;
+		goto out_unmap_rendec;
+	}
+
+	init_work(&ctx->work, stream_worker, HWA_DECODER);
+	if (!ctx->work) {
+		ret = ENOMEM;
+		goto out_unmap_rendec;
+	}
+
+	vxd->fw_refcnt++;
+
+	return ret;
+
+out_unmap_rendec:
+	img_mmu_unmap(ctx->mmu_ctx, vxd->mem_ctx, vxd->rendec_buf_id);
+out_unmap_fw:
+	img_mmu_unmap(ctx->mmu_ctx, vxd->mem_ctx, vxd->firmware.buf_id);
+
+out_destroy_mmu_ctx:
+	img_mmu_ctx_destroy(ctx->mmu_ctx);
+out_destroy_ctx:
+	img_mem_destroy_ctx(ctx->mem_ctx);
+	return ret;
+}
+
+void vxd_destroy_ctx(struct vxd_dev *vxd, struct vxd_dec_ctx *ctx)
+{
+	vxd->fw_refcnt--;
+
+	flush_work(ctx->work);
+
+	img_mmu_unmap(ctx->mmu_ctx, vxd->mem_ctx, vxd->rendec_buf_id);
+
+	img_mmu_unmap(ctx->mmu_ctx, vxd->mem_ctx, vxd->firmware.buf_id);
+
+	img_mmu_ctx_destroy(ctx->mmu_ctx);
+
+	img_mem_destroy_ctx(ctx->mem_ctx);
+
+	if (vxd->fw_refcnt == 0) {
+#ifdef DEBUG_DECODER_DRIVER
+		dev_info(vxd->dev, "FW: put %s\n", drv_fw_name);
+#endif
+		/* Poke the monitor to finally switch off the hw, when needed */
+		vxd_monitor_locked(vxd);
+	}
+}
+
+/* Top half */
+irqreturn_t vxd_handle_irq(void *dev)
+{
+	struct vxd_dev *vxd = ((const struct device *)dev)->driver_data;
+	struct vxd_hw_state *hw_state = &vxd->state.hw_state;
+	int ret;
+
+	if (!vxd)
+		return IRQ_NONE;
+
+	ret = vxd_pvdec_clear_int(vxd->reg_base, &hw_state->irq_status);
+
+	if (!hw_state->irq_status || ret == IRQ_NONE)
+		dev_warn(dev, "Got spurious interrupt!\n");
+
+	return (irqreturn_t)ret;
+}
+
+static void vxd_drop_msg_locked(const struct vxd_dev *vxd)
+{
+	int ret;
+
+	ret = vxd_pvdec_recv_msg(vxd->dev, vxd->reg_base, NULL, 0, (struct vxd_dev *)vxd);
+	if (ret)
+		dev_warn(vxd->dev, "%s: failed to receive msg!\n", __func__);
+}
+
+#ifdef DEBUG_DECODER_DRIVER
+static void vxd_dbg_dump_msg(const void *dev, const unsigned char *func,
+			     const unsigned int *payload,
+			     unsigned long msg_size)
+{
+	unsigned int i;
+
+	for (i = 0; i < msg_size; i++)
+		dev_dbg(dev, "%s: msg %d: 0x%08x\n", func, i, payload[i]);
+}
+#endif
+
+static struct vxd_item *vxd_get_orphaned_item_locked(struct vxd_dev *vxd,
+						     unsigned short msg_id,
+						     unsigned long msg_size)
+{
+	struct vxd_stream *stream;
+	struct vxd_item *item;
+	unsigned short str_id = VXD_MSG_ID_GET_STR_ID(msg_id);
+
+	/* Try to find associated stream */
+	stream = idr_find(vxd->streams, str_id);
+	if (!stream) {
+		/* Failed to find associated stream. */
+#ifdef DEBUG_DECODER_DRIVER
+		dev_dbg(vxd->dev, "%s: failed to find str_id: %u\n", __func__, str_id);
+#endif
+		return NULL;
+	}
+
+	item = kzalloc(sizeof(*item) + (msg_size * sizeof(unsigned int)), GFP_KERNEL);
+	if (!item)
+		return NULL;
+
+	item->msg.out_flags = 0;
+	item->stream_id = str_id;
+	item->msg.payload_size = msg_size * sizeof(unsigned int);
+	if (vxd_pvdec_recv_msg(vxd->dev, vxd->reg_base, item->msg.payload, msg_size, vxd)) {
+		dev_err(vxd->dev, "%s: failed to receive msg from VXD!\n", __func__);
+		item->msg.out_flags |= VXD_FW_MSG_FLAG_DEV_ERR;
+	}
+#ifdef DEBUG_DECODER_DRIVER
+	dev_dbg(vxd->dev, "%s: item: %p str_id: %u\n", __func__, item, str_id);
+#endif
+	/*
+	 * Need to put this item on the vxd->msgs list.
+	 * It will be removed after.
+	 */
+	list_add_tail(&item->list, &vxd->msgs);
+
+#ifdef DEBUG_DECODER_DRIVER
+	vxd_dbg_dump_msg(vxd->dev, __func__, item->msg.payload, msg_size);
+#endif
+
+	return item;
+}
+
+/*
+ * Fetch and process a single message from the MTX->host ring buffer.
+ * <no_more> parameter is used to indicate if there are more messages pending.
+ * <fatal> parameter indicates if there is some serious situation detected.
+ * Has to be called with vxd->mutex locked.
+ */
+static void vxd_handle_single_msg_locked(struct vxd_dev *vxd,
+					 unsigned char *no_more,
+					 unsigned char *fatal)
+{
+	int ret;
+	unsigned short msg_id, str_id;
+	unsigned long msg_size; /* size in dwords */
+	struct vxd_item *item = NULL, *tmp, *it;
+	struct vxd_stream *stream;
+	void *dev = vxd->dev;
+	unsigned char not_last_msg;
+
+	/* get the message size and id */
+	ret = vxd_pvdec_pend_msg_info(dev, vxd->reg_base, &msg_size, &msg_id,
+				      &not_last_msg);
+	if (ret) {
+		dev_err(dev, "%s: failed to get pending msg size!\n", __func__);
+		*no_more = TRUE; /* worker will HW failure */
+		return;
+	}
+
+	if (msg_size == 0) {
+		*no_more = TRUE;
+		return;
+	}
+	*no_more = FALSE;
+
+	str_id = VXD_MSG_ID_GET_STR_ID(msg_id);
+#ifdef DEBUG_DECODER_DRIVER
+	dev_dbg(dev, "%s: [msg] size: %zu, cnt: %u, str_id: %u, id: 0x%x\n",
+		__func__, msg_size, VXD_MSG_ID_GET_CNT(msg_id),
+		str_id, msg_id);
+	dev_dbg(dev, "%s: [msg] not last: %u\n", __func__, not_last_msg);
+#endif
+
+	cancel_delayed_work(vxd->dwork);
+
+	/* Find associated item */
+	list_for_each_entry_safe_reverse(it, tmp, &vxd->msgs, list) {
+#ifdef DEBUG_DECODER_DRIVER
+		dev_dbg(dev, "%s: checking item %p [0x%x] [des: %d]\n",
+			__func__, it, it->msg_id, it->destroy);
+#endif
+		if (it->msg_id == msg_id) {
+			item = it;
+			break;
+		}
+	}
+
+#ifdef DEBUG_DECODER_DRIVER
+	dev_dbg(dev, "%s: found item %p [destroy: %d]\n",
+		__func__, item, item ? item->destroy : VXD_INVALID_ID);
+#endif
+
+	/* Find associated stream */
+	stream = idr_find(vxd->streams, str_id);
+	/*
+	 * Check for firmware condition in case
+	 * when unexpected item is received.
+	 */
+	if (!item && !stream && vxd_pvdec_check_fw_status(dev, vxd->reg_base)) {
+		struct vxd_item *orphan;
+		/*
+		 * Lets forward the fatal info to listeners first, relaying
+		 * on the head of the msg queue.
+		 */
+		/* TODO: forward fatal info to all attached processes */
+		item = list_entry(vxd->msgs.prev, struct vxd_item, list);
+		orphan = vxd_get_orphaned_item_locked(vxd, item->msg_id, msg_size);
+		if (!orphan) {
+			dev_warn(dev, "%s: drop msg 0x%x! (no orphan)\n", __func__, item->msg_id);
+			vxd_drop_msg_locked(vxd);
+		}
+
+		*fatal = TRUE;
+		return;
+	}
+
+	if ((item && item->destroy) || !stream) {
+		/*
+		 * Item was marked for destruction or we failed to find
+		 * associated stream. Probably it was already destroyed --
+		 * just ignore the message.
+		 */
+		if (item) {
+			__list_del_entry(&item->list);
+			kfree(item);
+			item = NULL;
+		}
+		dev_warn(dev, "%s: drop msg 0x%x! (no owner)\n", __func__, msg_id);
+		vxd_drop_msg_locked(vxd);
+		return;
+	}
+
+	/* Remove item from vxd->msgs list */
+	if (item && item->msg_id == msg_id && !not_last_msg)
+		__list_del_entry(&item->list);
+
+	/*
+	 * If there's no such item on a <being processed> list, or the one
+	 * found is too small to fit the output, or it's not supposed to be
+	 * released, allocate a new one.
+	 */
+	if (!item || (msg_size * sizeof(unsigned int) > item->msg.payload_size) || not_last_msg) {
+		struct vxd_item *new_item;
+
+		new_item = kzalloc(sizeof(*new_item) +
+				(msg_size * sizeof(unsigned int)), GFP_KERNEL);
+		if (item) {
+			if (!new_item) {
+				/*
+				 * Failed to allocate new item. Mark item as
+				 * errored and continue best effort, provide
+				 * only part of the message to the userspace
+				 */
+				dev_err(dev, "%s: failed to alloc new item!\n", __func__);
+				msg_size = item->msg.payload_size / sizeof(unsigned int);
+				item->msg.out_flags |= VXD_FW_MSG_FLAG_DRV_ERR;
+			} else {
+				*new_item = *item;
+				/*
+				 * Do not free the old item if subsequent
+				 * messages are expected (it also wasn't
+				 * removed from the vxd->msgs list, so we are
+				 * not losing a pointer here).
+				 */
+				if (!not_last_msg)
+					kfree(item);
+				item = new_item;
+			}
+		} else {
+			if (!new_item) {
+				/*
+				 * We have no place to put the message, we have
+				 * to drop it
+				 */
+				dev_err(dev, "%s: drop msg 0x%08x! (no mem)\n", __func__, msg_id);
+				vxd_drop_msg_locked(vxd);
+				return;
+			}
+			/*
+			 * There was no corresponding item on the
+			 * <being processed> list and we've allocated
+			 * a new one. Initialize it
+			 */
+			new_item->msg.out_flags = 0;
+			new_item->stream_id = str_id;
+			item = new_item;
+		}
+	}
+	ret = vxd_pvdec_recv_msg(dev, vxd->reg_base, item->msg.payload, msg_size, vxd);
+	if (ret) {
+		dev_err(dev, "%s: failed to receive msg from VXD!\n", __func__);
+		item->msg.out_flags |= VXD_FW_MSG_FLAG_DEV_ERR;
+	}
+	item->msg.payload_size = msg_size * sizeof(unsigned int);
+
+#ifdef DEBUG_DECODER_DRIVER
+	vxd_dbg_dump_msg(dev, __func__, item->msg.payload, msg_size);
+
+	dev_dbg(dev, "%s: adding to done list, item: %p, msg_size: %zu\n",
+		__func__, item, msg_size);
+#endif
+	list_add_tail(&item->list, &stream->ctx->items_done);
+
+#ifdef DEBUG_DECODER_DRIVER
+	dev_info(dev, "%s: signaling worker for %p\n", __func__, stream->ctx);
+#endif
+	schedule_work(stream->ctx->work);
+}
+
+/* Bottom half */
+irqreturn_t vxd_handle_thread_irq(void *dev)
+{
+	unsigned char no_more = FALSE;
+	unsigned char fatal = FALSE;
+	struct vxd_dev *vxd = ((const struct device *)dev)->driver_data;
+	struct vxd_hw_state *hw_state = &vxd->state.hw_state;
+	irqreturn_t ret = IRQ_HANDLED;
+
+	if (!vxd)
+		return IRQ_NONE;
+
+	mutex_lock(vxd->mutex);
+
+	/* Spurious interrupt? */
+	if (unlikely(!vxd->hw_on || vxd->hw_dead)) {
+		ret = IRQ_NONE;
+		goto out_unlock;
+	}
+
+	/* Check for critical exception - only MMU faults for now */
+	if (vxd_pvdec_check_irq(dev, vxd->reg_base, hw_state->irq_status) < 0) {
+#ifdef DEBUG_DECODER_DRIVER
+		dev_info(vxd->dev, "device MMU fault: resetting!!!\n");
+#endif
+		vxd_emrg_reset_locked(vxd, VXD_FW_MSG_FLAG_MMU_FAULT);
+		goto out_unlock;
+	}
+
+	/*
+	 * Single interrupt can correspond to multiple messages, handle them
+	 * all.
+	 */
+	while (!no_more)
+		vxd_handle_single_msg_locked(vxd, &no_more, &fatal);
+
+	if (fatal) {
+#ifdef DEBUG_DECODER_DRIVER
+		dev_info(vxd->dev, "fw fatal condition: resetting!!!\n");
+#endif
+		/* Try to recover ... */
+		vxd_emrg_reset_locked(vxd, VXD_FW_MSG_FLAG_FATAL);
+	} else {
+		/* Try to submit items to the HW */
+		vxd_schedule_locked(vxd);
+	}
+
+out_unlock:
+	hw_state->irq_status = 0;
+	mutex_unlock(vxd->mutex);
+
+	return ret;
+}
+
+static void vxd_worker(void *work)
+{
+	struct vxd_dev *vxd = NULL;
+	struct vxd_hw_state state = { 0 };
+	struct vxd_item *item_tail;
+
+	work = get_delayed_work_buff(work, FALSE);
+	vxd = container_of(work, struct vxd_dev, dwork);
+	mutex_lock(vxd->mutex);
+
+#ifdef DEBUG_DECODER_DRIVER
+	dev_dbg(vxd->dev, "%s: jif: %lu, pm: %llu dwr: %llu\n", __func__,
+		jiffies, vxd->pm_start, vxd->dwr_start);
+#endif
+
+	/*
+	 * Disable the hardware if it has been idle for vxd->hw_pm_delay
+	 * milliseconds. Or simply leave the function without doing anything
+	 * if the HW is not supposed to be turned off.
+	 */
+	if (list_empty(&vxd->pend) && list_empty(&vxd->msgs)) {
+		if (vxd_is_apm_required(vxd)) {
+			unsigned long long dst = vxd->pm_start +
+				msecs_to_jiffies(vxd->hw_pm_delay);
+
+			if (time_is_before_eq_jiffies((unsigned long)dst)) {
+#ifdef DEBUG_DECODER_DRIVER
+				dev_dbg(vxd->dev, "%s: pm, power off\n", __func__);
+#endif
+				vxd_make_hw_off_locked(vxd, FALSE);
+			} else {
+				unsigned long long targ = dst - jiffies;
+
+#ifdef DEBUG_DECODER_DRIVER
+				dev_dbg(vxd->dev, "%s: pm, reschedule: %llu\n", __func__, targ);
+#endif
+				vxd_sched_worker_locked(vxd, jiffies_to_msecs(targ));
+			}
+		}
+		goto out_unlock;
+	}
+
+	/*
+	 * We are not processing anything, but pending list is not empty (if it
+	 * was, we would enter <if statement> above. This can happen upon
+	 * specific conditions, when input message occupies almost whole
+	 * host->MTX ring buffer and is followed by large padding message.
+	 */
+	if (list_empty(&vxd->msgs)) {
+		vxd_schedule_locked(vxd);
+		goto out_unlock;
+	}
+
+	/* Skip emergency reset if it's disabled. */
+	if (vxd->hw_dwr_period <= 0) {
+#ifdef DEBUG_DECODER_DRIVER
+		dev_dbg(vxd->dev, "%s: skip watchdog\n", __func__);
+#endif
+		goto out_unlock;
+	} else {
+		/* Recalculate DWR when needed */
+		unsigned long long dst = vxd->dwr_start +
+			msecs_to_jiffies(vxd->hw_dwr_period);
+
+		if (time_is_after_jiffies((unsigned long)dst)) {
+			unsigned long long targ = dst - jiffies;
+
+#ifdef DEBUG_DECODER_DRIVER
+			dev_dbg(vxd->dev, "%s: dwr, reschedule: %llu\n", __func__, targ);
+#endif
+			vxd_sched_worker_locked(vxd, jiffies_to_msecs(targ));
+			goto out_unlock;
+		}
+	}
+
+	/* Get ID of the oldest item being processed by the HW */
+	item_tail = list_entry(vxd->msgs.prev, struct vxd_item, list);
+
+#ifdef DEBUG_DECODER_DRIVER
+	dev_dbg(vxd->dev, "%s: tail_item: %p, id: 0x%x\n", __func__, item_tail,
+		item_tail->msg_id);
+#endif
+
+	/* Get HW and firmware state */
+	vxd_pvdec_get_state(vxd->dev, vxd->reg_base, VXD_NUM_PIX_PIPES(vxd->props), &state);
+
+	if (vxd->state.msg_id_tail == item_tail->msg_id &&
+	    !memcmp(&state, &vxd->state.hw_state,
+	    sizeof(struct vxd_hw_state))) {
+		vxd->state.msg_id_tail = 0;
+		memset(&vxd->state.hw_state, 0, sizeof(vxd->state.hw_state));
+		dev_err(vxd->dev, "device DWR(%ums) expired: resetting!!!\n",
+			vxd->hw_dwr_period);
+		vxd_emrg_reset_locked(vxd, VXD_FW_MSG_FLAG_DWR);
+	} else {
+		/* Record current state */
+		vxd->state.msg_id_tail = item_tail->msg_id;
+		vxd->state.hw_state = state;
+
+		/* Submit items to the HW, if space is available.  */
+		vxd_schedule_locked(vxd);
+
+#ifdef DEBUG_DECODER_DRIVER
+		dev_dbg(vxd->dev, "%s: scheduling DWR work (%d ms)!\n",
+			__func__, vxd->hw_dwr_period);
+#endif
+		vxd_sched_worker_locked(vxd, vxd->hw_dwr_period);
+	}
+
+out_unlock:
+	mutex_unlock(vxd->mutex);
+}
+
+/*
+ * Lazy initialization of main driver context (when first core is probed -- we
+ * need heap configuration from sysdev to allocate firmware buffers.
+ */
+int vxd_init(void *dev, struct vxd_dev *vxd,
+	     const struct heap_config heap_configs[], int heaps)
+{
+	int ret, i;
+
+	INIT_LIST_HEAD(&vxd_drv.heaps);
+	vxd_drv.internal_heap_id = VXD_INVALID_ID;
+
+	vxd_drv.mem_ctx = NULL;
+
+	INIT_LIST_HEAD(&vxd_drv.devices);
+
+	vxd_drv.virt_space.fw_addr = 0x42000;
+	vxd_drv.virt_space.rendec_addr = 0xe0000000;
+
+	vxd_drv.initialised = 0;
+
+#ifdef DEBUG_DECODER_DRIVER
+	dev_dbg(dev, "%s: vxd drv init, params:\n", __func__);
+#endif
+
+	/* Initialise memory management component */
+	for (i = 0; i < heaps; i++) {
+		struct vxd_heap *heap;
+
+#ifdef DEBUG_DECODER_DRIVER
+		dev_dbg(dev, "%s: adding heap of type %d\n",
+			__func__, heap_configs[i].type);
+#endif
+
+		heap = kzalloc(sizeof(*heap), GFP_KERNEL);
+		if (!heap) {
+			ret = -ENOMEM;
+			goto heap_add_failed;
+		}
+
+		ret = img_mem_add_heap(&heap_configs[i], &heap->id);
+		if (ret < 0) {
+			dev_err(dev, "%s: failed to init heap (type %d)!\n",
+				__func__, heap_configs[i].type);
+			kfree(heap);
+			goto heap_add_failed;
+		}
+		list_add(&heap->list, &vxd_drv.heaps);
+
+		/* Implicitly, first heap is used for internal allocations */
+		if (vxd_drv.internal_heap_id < 0) {
+			vxd_drv.internal_heap_id = heap->id;
+#ifdef DEBUG_DECODER_DRIVER
+			dev_dbg(dev, "%s: using heap %d for internal alloc\n",
+				__func__, vxd_drv.internal_heap_id);
+#endif
+		}
+	}
+
+	/* Do not proceed if internal heap not defined */
+	if (vxd_drv.internal_heap_id < 0) {
+		dev_err(dev, "%s: failed to locate heap for internal alloc\n", __func__);
+		ret = -EINVAL;
+		/* Loop registered heaps just for sanity */
+		goto heap_add_failed;
+	}
+
+	/* Create memory management context for HW buffers */
+	ret = img_mem_create_ctx(&vxd_drv.mem_ctx);
+	if (ret) {
+		dev_err(dev, "%s: failed to create mem context (err:%d)!\n", __func__, ret);
+		goto create_mem_context_failed;
+	}
+
+	vxd->mem_ctx = vxd_drv.mem_ctx;
+
+	/* Allocate rendec buffer */
+	ret = img_mem_alloc(dev, vxd_drv.mem_ctx, vxd_drv.internal_heap_id,
+			    VXD_RENDEC_SIZE * VXD_NUM_PIX_PIPES(vxd->props),
+			    (enum mem_attr)0, &vxd->rendec_buf_id);
+	if (ret) {
+		dev_err(dev, "%s: alloc rendec buffer failed (err:%d)!\n", __func__, ret);
+		goto create_mem_context_failed;
+	}
+
+	init_delayed_work(&vxd->dwork, vxd_worker, HWA_DECODER);
+	if (!vxd->dwork) {
+		ret = ENOMEM;
+		goto create_mem_context_failed;
+	}
+
+	vxd_drv.initialised = 1;
+#ifdef DEBUG_DECODER_DRIVER
+	dev_dbg(dev, "%s: vxd drv init done\n", __func__);
+#endif
+	return 0;
+
+create_mem_context_failed:
+heap_add_failed:
+	while (!list_empty(&vxd_drv.heaps)) {
+		struct vxd_heap *heap;
+
+		heap = list_first_entry(&vxd_drv.heaps, struct vxd_heap, list);
+		__list_del_entry(&heap->list);
+		img_mem_del_heap(heap->id);
+		kfree(heap);
+	}
+	vxd_drv.internal_heap_id = VXD_INVALID_ID;
+	return ret;
+}
+
+/*
+ * Get internal_heap_id
+ * TODO: Only error checking is if < 0, so if the stored value is < 0, then
+ * just passing the value to caller still conveys error.
+ * Caller must error check.
+ */
+int vxd_g_internal_heap_id(void)
+{
+	return vxd_drv.internal_heap_id;
+}
+
+void vxd_deinit(struct vxd_dev *vxd)
+{
+	cancel_delayed_work_sync(vxd->dwork);
+	vxd_make_hw_off_locked(vxd, FALSE);
+
+	/* Destroy memory management context */
+	if (vxd_drv.mem_ctx) {
+		/* Deallocate rendec buffer */
+		img_mem_free(vxd_drv.mem_ctx, vxd->rendec_buf_id);
+
+		img_mem_destroy_ctx(vxd_drv.mem_ctx);
+		vxd_drv.mem_ctx = NULL;
+	}
+
+	/* Deinitialize memory management component */
+	while (!list_empty(&vxd_drv.heaps)) {
+		struct vxd_heap *heap;
+
+		heap = list_first_entry(&vxd_drv.heaps, struct vxd_heap, list);
+		__list_del_entry(&heap->list);
+		img_mem_del_heap(heap->id);
+		kfree(heap);
+	}
+
+	vxd_drv.internal_heap_id = VXD_INVALID_ID;
+	vxd_drv.mem_ctx = NULL;
+	vxd_drv.virt_space.fw_addr = 0x0;
+	vxd_drv.virt_space.rendec_addr = 0x0;
+	vxd_drv.initialised = 0;
+
+#ifdef ERROR_RECOVERY_SIMULATION
+	/* free the kernel object created to debug */
+	kobject_put(vxd_dec_kobject);
+#endif
+}
+
+static void vxd_fw_loaded(const struct firmware *fw, void *context)
+{
+	struct vxd_dev *vxd = context;
+	unsigned long bin_size;
+	int buf_id;
+	struct vxd_fw_hdr *hdr;
+	void *buf_kptr;
+	int ret;
+	unsigned long size = 0;
+	const unsigned char *data = NULL;
+
+	if (!fw) {
+		dev_err(vxd->dev, "Firmware binary is not present\n");
+		vxd->no_fw = 1;
+		return;
+	}
+
+	size = fw->size;
+	data = fw->data;
+
+#ifdef DEBUG_DECODER_DRIVER
+	dev_info(vxd->dev, "FW: acquired %s size %zu\n", drv_fw_name, size);
+#endif
+
+	/* Sanity verification of the firmware */
+	if (size < sizeof(struct vxd_fw_hdr)) {
+		dev_err(vxd->dev, "%s: firmware file too small!\n", __func__);
+		goto out;
+	}
+
+	bin_size = size - sizeof(struct vxd_fw_hdr);
+	ret = img_mem_alloc(vxd->dev, vxd_drv.mem_ctx, vxd_drv.internal_heap_id,
+			    bin_size, (enum mem_attr)0, &buf_id);
+	if (ret) {
+		dev_err(vxd->dev, "%s: failed to alloc fw buffer (err:%d)!\n", __func__, ret);
+		goto out;
+	}
+
+	hdr = kzalloc(sizeof(*hdr), GFP_KERNEL);
+	if (!hdr)
+		goto out_release_buf;
+
+	/* Store firmware header in vxd context */
+	memcpy(hdr, data, sizeof(struct vxd_fw_hdr));
+
+#ifdef DEBUG_DECODER_DRIVER
+	dev_info(vxd->dev, "FW: info cs: %u, bs: %u, id: 0x%08x, ts: %u\n",
+		 hdr->core_size, hdr->blob_size,
+		 hdr->firmware_id, hdr->timestamp);
+#endif
+
+	/* Check if header is consistent */
+	if (hdr->core_size > bin_size || hdr->blob_size > bin_size) {
+		dev_err(vxd->dev, "%s: got invalid firmware!\n", __func__);
+		goto out_release_hdr;
+	}
+
+	/* Map the firmware buffer to CPU */
+	ret = img_mem_map_km(vxd_drv.mem_ctx, buf_id);
+	if (ret) {
+		dev_err(vxd->dev, "%s: failed to map FW buf to cpu! (%d)\n", __func__, ret);
+		goto out_release_hdr;
+	}
+
+	/* Copy firmware to device buffer */
+	buf_kptr = img_mem_get_kptr(vxd_drv.mem_ctx, buf_id);
+	memcpy(buf_kptr, data + sizeof(struct vxd_fw_hdr), size - sizeof(struct vxd_fw_hdr));
+#ifdef DEBUG_DECODER_DRIVER
+	dev_dbg(vxd->dev, "%s: FW: copied to buffer %d kptr 0x%p\n", __func__, buf_id, buf_kptr);
+#endif
+
+	img_mem_sync_cpu_to_device(vxd_drv.mem_ctx, buf_id);
+
+	vxd->firmware.fw_size = size;
+	vxd->firmware.buf_id = buf_id;
+	vxd->firmware.hdr = hdr;
+	vxd->firmware.ready = TRUE;
+
+	release_firmware(fw);
+	complete_all(vxd->firmware_loading_complete);
+	pr_debug("Firmware loaded successfully ..!!\n");
+	return;
+
+out_release_hdr:
+	kfree(hdr);
+out_release_buf:
+	img_mem_free(vxd_drv.mem_ctx, buf_id);
+out:
+	release_firmware(fw);
+	complete_all(vxd->firmware_loading_complete);
+	kfree(vxd->firmware_loading_complete);
+	vxd->firmware_loading_complete = NULL;
+}
+
+/*
+ * Takes the firmware from the file system and allocates a buffer
+ */
+int vxd_prepare_fw(struct vxd_dev *vxd)
+{
+	int ret;
+
+	/* Fetch firmware from the file system */
+	struct completion **firmware_loading_complete =
+		(struct completion **)&vxd->firmware_loading_complete;
+
+	*firmware_loading_complete = kmalloc(sizeof(*firmware_loading_complete), GFP_KERNEL);
+	if (!(*firmware_loading_complete)) {
+		pr_err("Memory allocation failed for init_completion\n");
+		return -ENOMEM;
+	}
+	init_completion(*firmware_loading_complete);
+
+	if (!vxd->firmware_loading_complete)
+		return -ENOMEM;
+
+	vxd->firmware.ready = FALSE;
+	ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_UEVENT,
+				      drv_fw_name, vxd->dev, GFP_KERNEL, vxd,
+				      vxd_fw_loaded);
+	if (ret < 0) {
+		dev_err(vxd->dev, "request_firmware_nowait err: %d\n", ret);
+		complete_all(vxd->firmware_loading_complete);
+		kfree(vxd->firmware_loading_complete);
+		vxd->firmware_loading_complete = NULL;
+	}
+
+	return ret;
+}
+
+/*
+ * Cleans firmware resources
+ */
+void vxd_clean_fw_resources(struct vxd_dev *vxd)
+{
+#ifdef DEBUG_DECODER_DRIVER
+	dev_dbg(vxd->dev, "%s:%d\n", __func__, __LINE__);
+#endif
+
+	wait_for_completion(vxd->firmware_loading_complete);
+	kfree(vxd->firmware_loading_complete);
+	vxd->firmware_loading_complete = NULL;
+
+	if (vxd->firmware.fw_size) {
+		img_mem_free(vxd_drv.mem_ctx, vxd->firmware.buf_id);
+		kfree(vxd->firmware.hdr);
+		vxd->firmware.hdr = NULL;
+#ifdef DEBUG_DECODER_DRIVER
+		dev_info(vxd->dev, "FW: released %s\n", drv_fw_name);
+#endif
+		vxd->firmware.buf_id = VXD_INVALID_ID;
+	}
+}
+
+/*
+ * Submit a message to the VXD.
+ * <ctx> is used to verify that requested stream id (item->stream_id) is valid
+ * for this ctx
+ */
+int vxd_send_msg(struct vxd_dec_ctx *ctx, struct vxd_fw_msg *msg)
+{
+	struct vxd_dev *vxd = ctx->dev;
+	unsigned long msg_size;
+	struct vxd_item *item;
+	struct vxd_stream *stream;
+	int ret;
+
+	if (msg->payload_size < VXD_MIN_INPUT_SIZE)
+		return -EINVAL;
+
+	if (msg->payload_size % sizeof(unsigned int)) {
+		dev_err(vxd->dev, "msg size not aligned! (%u)\n",
+			msg->payload_size);
+		return -EINVAL;
+	}
+
+	msg_size = VXD_MSG_SIZE(*msg);
+
+	if (msg_size > VXD_MAX_INPUT_SIZE)
+		return -EINVAL;
+
+	/* Verify that the gap was left for stream PTD */
+	if (msg->payload[VXD_PTD_MSG_OFFSET] != 0) {
+		dev_err(vxd->dev, "%s: PTD gap missing!\n", __func__);
+		return -EINVAL;
+	}
+
+	ret = mutex_lock_interruptible_nested(ctx->mutex, SUBCLASS_VXD_CORE);
+	if (ret)
+		return ret;
+
+	stream = idr_find(vxd->streams, ctx->stream.id);
+	if (!stream) {
+		dev_warn(vxd->dev, "%s: invalid stream id requested! (%u)\n",
+			 __func__, ctx->stream.id);
+
+		ret = -EINVAL;
+		goto out_unlock;
+	}
+
+	item = kmalloc(sizeof(*item) + msg->payload_size, GFP_KERNEL);
+	if (!item) {
+		ret = -ENOMEM;
+		goto out_unlock;
+	}
+
+	memcpy(&item->msg, msg, msg_size);
+
+	msg->out_flags &= VXD_FW_MSG_WR_FLAGS_MASK;
+	item->stream_id = ctx->stream.id;
+	item->msg_id = 0;
+	item->msg.out_flags = msg->out_flags;
+	item->destroy = 0;
+
+	/*
+	 * Inject the stream PTD into the message. It was already verified that
+	 * there is enough space.
+	 */
+	item->msg.payload[VXD_PTD_MSG_OFFSET] = stream->ptd;
+
+	list_add_tail(&item->list, &vxd->pend);
+#ifdef DEBUG_DECODER_DRIVER
+	dev_dbg(vxd->dev,
+		"%s: added item %p to pend, ptd: 0x%x, str: %u flags: 0x%x\n",
+		__func__, item, stream->ptd, stream->id, item->msg.out_flags);
+#endif
+
+	vxd_schedule_locked(vxd);
+
+out_unlock:
+	mutex_unlock(ctx->mutex);
+
+	return ret;
+}
+
+int vxd_suspend_dev(void *dev)
+{
+	struct vxd_dev *vxd = platform_get_drvdata(to_platform_device(dev));
+
+	mutex_lock(vxd->mutex);
+#ifdef DEBUG_DECODER_DRIVER
+	dev_dbg(dev, "%s: taking a nap!\n", __func__);
+#endif
+
+	/* Cancel the worker first */
+	cancel_delayed_work(vxd->dwork);
+
+	/* Forcing hardware disable */
+	vxd_make_hw_off_locked(vxd, TRUE);
+
+	/* Move all valid items to the pending queue */
+	vxd_rewind_msgs_locked(vxd);
+
+	mutex_unlock(vxd->mutex);
+
+	return 0;
+}
+
+int vxd_resume_dev(void *dev)
+{
+	struct vxd_dev *vxd = platform_get_drvdata(to_platform_device(dev));
+	int ret = 0;
+
+	mutex_lock(vxd->mutex);
+#ifdef DEBUG_DECODER_DRIVER
+	dev_dbg(dev, "%s: waking up!\n", __func__);
+#endif
+
+	mutex_unlock(vxd->mutex);
+
+	return ret;
+}
+
+int vxd_map_buffer_sg(struct vxd_dev *vxd, struct vxd_dec_ctx *ctx,
+		      unsigned int str_id,
+		      unsigned int buff_id,
+		      void *sgt, unsigned int virt_addr,
+		      unsigned int map_flags)
+{
+	struct vxd_stream *stream;
+	unsigned int flags = VXD_MMU_PTD_FLAG_NONE;
+	int ret;
+
+	ret = mutex_lock_interruptible_nested(ctx->mutex, SUBCLASS_VXD_CORE);
+	if (ret)
+		return ret;
+
+	stream = idr_find(vxd->streams, str_id);
+	if (!stream) {
+		dev_err(vxd->dev, "%s: stream %d not found!\n", __func__, str_id);
+		ret = -EINVAL;
+		goto out_unlock;
+	}
+
+	if ((map_flags & (VXD_MAP_FLAG_READ_ONLY | VXD_MAP_FLAG_WRITE_ONLY))
+		== (VXD_MAP_FLAG_READ_ONLY | VXD_MAP_FLAG_WRITE_ONLY)) {
+		dev_err(vxd->dev, "%s: Bogus mapping flags 0x%x!\n", __func__,
+			map_flags);
+		ret = -EINVAL;
+		goto out_unlock;
+	}
+
+	/* Convert permission flags to internal definitions */
+	if (map_flags & VXD_MAP_FLAG_READ_ONLY)
+		flags |= VXD_MMU_PTD_FLAG_READ_ONLY;
+
+	if (map_flags & VXD_MAP_FLAG_WRITE_ONLY)
+		flags |= VXD_MMU_PTD_FLAG_WRITE_ONLY;
+
+	ret = img_mmu_map_sg(stream->mmu_ctx, ctx->mem_ctx, buff_id, sgt, virt_addr, flags);
+	if (ret) {
+		dev_err(vxd->dev, "%s: map failed!\n", __func__);
+		goto out_unlock;
+	}
+
+#ifdef DEBUG_DECODER_DRIVER
+	dev_dbg(vxd->dev,
+		"%s: mapped buf %u to 0x%08x, str_id: %u flags: 0x%x\n",
+		__func__, buff_id, virt_addr, str_id, flags);
+#endif
+
+out_unlock:
+	mutex_unlock(ctx->mutex);
+	return ret;
+}
+
+int vxd_map_buffer(struct vxd_dev *vxd, struct vxd_dec_ctx *ctx, unsigned int str_id,
+		   unsigned int buff_id,
+		   unsigned int virt_addr,
+		   unsigned int map_flags)
+{
+	struct vxd_stream *stream;
+	unsigned int flags = VXD_MMU_PTD_FLAG_NONE;
+	int ret;
+
+	ret = mutex_lock_interruptible_nested(ctx->mutex, SUBCLASS_VXD_CORE);
+	if (ret)
+		return ret;
+
+	stream = idr_find(vxd->streams, str_id);
+	if (!stream) {
+		dev_err(vxd->dev, "%s: stream %d not found!\n", __func__, str_id);
+		ret = -EINVAL;
+		goto out_unlock;
+	}
+
+	if ((map_flags & (VXD_MAP_FLAG_READ_ONLY | VXD_MAP_FLAG_WRITE_ONLY))
+		== (VXD_MAP_FLAG_READ_ONLY | VXD_MAP_FLAG_WRITE_ONLY)) {
+		dev_err(vxd->dev, "%s: Bogus mapping flags 0x%x!\n", __func__, map_flags);
+		ret = -EINVAL;
+		goto out_unlock;
+	}
+
+	/* Convert permission flags to internal definitions */
+	if (map_flags & VXD_MAP_FLAG_READ_ONLY)
+		flags |= VXD_MMU_PTD_FLAG_READ_ONLY;
+
+	if (map_flags & VXD_MAP_FLAG_WRITE_ONLY)
+		flags |= VXD_MMU_PTD_FLAG_WRITE_ONLY;
+
+	ret = img_mmu_map(stream->mmu_ctx, ctx->mem_ctx, buff_id, virt_addr, flags);
+	if (ret) {
+		dev_err(vxd->dev, "%s: map failed!\n", __func__);
+		goto out_unlock;
+	}
+
+#ifdef DEBUG_DECODER_DRIVER
+	dev_dbg(vxd->dev,
+		"%s: mapped buf %u to 0x%08x, str_id: %u flags: 0x%x\n",
+		__func__, buff_id, virt_addr, str_id, flags);
+#endif
+
+out_unlock:
+	mutex_unlock(ctx->mutex);
+	return ret;
+}
+
+int vxd_unmap_buffer(struct vxd_dev *vxd, struct vxd_dec_ctx *ctx,
+		     unsigned int str_id, unsigned int buff_id)
+{
+	struct vxd_stream *stream;
+	int ret;
+
+	ret = mutex_lock_interruptible_nested(ctx->mutex, SUBCLASS_VXD_CORE);
+	if (ret)
+		return ret;
+
+	stream = idr_find(vxd->streams, str_id);
+	if (!stream) {
+		dev_err(vxd->dev, "%s: stream %d not found!\n", __func__, str_id);
+		ret = -EINVAL;
+		goto out_unlock;
+	}
+
+	ret = img_mmu_unmap(stream->mmu_ctx, ctx->mem_ctx, buff_id);
+	if (ret) {
+		dev_err(vxd->dev, "%s: map failed!\n", __func__);
+		goto out_unlock;
+	}
+
+#ifdef DEBUG_DECODER_DRIVER
+	dev_dbg(vxd->dev, "%s: unmapped buf %u str_id: %u\n", __func__, buff_id, str_id);
+#endif
+
+out_unlock: mutex_unlock(ctx->mutex);
+	return ret;
+}
diff --git a/drivers/staging/media/vxd/decoder/vxd_dec.c b/drivers/staging/media/vxd/decoder/vxd_dec.c
new file mode 100644
index 000000000000..cf3cf9b7b6f0
--- /dev/null
+++ b/drivers/staging/media/vxd/decoder/vxd_dec.c
@@ -0,0 +1,185 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * IMG DEC SYSDEV and UI Interface function implementations
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ *	Amit Makani <amit.makani@ti.com>
+ *
+ * Re-written for upstreamimg
+ *	Sidraya Jayagond <sidraya.bj@pathpartnertech.com>
+ *	Prashanth Kumar Amai <prashanth.ka@pathpartnertech.com>
+ */
+
+#include <linux/dma-mapping.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-mem2mem.h>
+
+#include "core.h"
+#include "h264fw_data.h"
+#include "hevcfw_data.h"
+#include "img_dec_common.h"
+#include "vxd_pvdec_priv.h"
+
+unsigned int get_nbuffers(enum vdec_vid_std std, int w, int h,
+			  unsigned int max_num_ref_frames)
+{
+	unsigned int nbuffers;
+
+	switch (std) {
+	case VDEC_STD_H264:
+		/*
+		 * Request number of buffers from header bspp information
+		 * using formula N + Display Lag
+		 * Parser is passing (2*N)
+		 */
+		if (max_num_ref_frames == 0) {
+			nbuffers = DISPLAY_LAG + min(MAX_CAPBUFS_H264,
+					(184320 / ((w / 16) * (h / 16))));
+		} else {
+			nbuffers = max_num_ref_frames + DISPLAY_LAG;
+		}
+		break;
+	case VDEC_STD_HEVC:
+		if (max_num_ref_frames == 0) {
+			if ((w * h) <= (HEVC_MAX_LUMA_PS >> 2))
+				nbuffers = 16;
+			else if ((w * h) <= (HEVC_MAX_LUMA_PS >> 1))
+				nbuffers = 12;
+			else if ((w * h) <= ((3 * HEVC_MAX_LUMA_PS) >> 2))
+				nbuffers = 8;
+			else
+				nbuffers = 6;
+			nbuffers += DISPLAY_LAG;
+		} else {
+			nbuffers = max_num_ref_frames + DISPLAY_LAG;
+		}
+		break;
+#ifdef HAS_JPEG
+	case VDEC_STD_JPEG:
+		/*
+		 * Request number of output buffers based on h264 spec
+		 * + display delay
+		 */
+		nbuffers = DISPLAY_LAG + min(MAX_CAPBUFS_H264,
+				(184320 / ((w / 16) * (h / 16))));
+		break;
+#endif
+	default:
+		nbuffers = 0;
+	}
+
+	return nbuffers;
+}
+
+int vxd_dec_alloc_bspp_resource(struct vxd_dec_ctx *ctx, enum vdec_vid_std vid_std)
+{
+	struct vxd_dev *vxd_dev = ctx->dev;
+	struct device *dev = vxd_dev->v4l2_dev.dev;
+	struct vdec_buf_info buf_info;
+	struct bspp_ddbuf_array_info *fw_sequ = ctx->fw_sequ;
+	struct bspp_ddbuf_array_info *fw_pps = ctx->fw_pps;
+	int attributes = 0, heap_id = 0, size = 0;
+	int i, ret = 0;
+
+	attributes = SYS_MEMATTRIB_UNCACHED | SYS_MEMATTRIB_WRITECOMBINE |
+		SYS_MEMATTRIB_INTERNAL | SYS_MEMATTRIB_CPU_WRITE;
+	heap_id = vxd_g_internal_heap_id();
+
+	size = vid_std == VDEC_STD_HEVC ?
+		sizeof(struct hevcfw_sequence_ps) : sizeof(struct h264fw_sequence_ps);
+
+#ifdef HAS_JPEG
+	if (vid_std == VDEC_STD_JPEG)
+		size = sizeof(struct vdec_jpeg_sequ_hdr_info);
+#endif
+
+	for (i = 0; i < MAX_SEQUENCES; i++) {
+		ret = img_mem_alloc(vxd_dev->dev, ctx->mem_ctx, heap_id,
+				    size, (enum mem_attr)attributes,
+				    (int *)&fw_sequ[i].ddbuf_info.buf_id);
+		if (ret) {
+			dev_err(dev, "Couldn't allocate sequ buffer %d\n", i);
+			return -ENOMEM;
+		}
+		ret = img_mem_map_km(ctx->mem_ctx, fw_sequ[i].ddbuf_info.buf_id);
+		if (ret) {
+			dev_err(dev, "Couldn't map sequ buffer %d\n", i);
+			return -ENOMEM;
+		}
+		fw_sequ[i].ddbuf_info.cpu_virt_addr = img_mem_get_kptr
+							(ctx->mem_ctx,
+							 fw_sequ[i].ddbuf_info.buf_id);
+		fw_sequ[i].buf_offset = 0;
+		fw_sequ[i].buf_element_size = size;
+		fw_sequ[i].ddbuf_info.buf_size = size;
+		fw_sequ[i].ddbuf_info.mem_attrib = (enum sys_emem_attrib)attributes;
+		memset(fw_sequ[i].ddbuf_info.cpu_virt_addr, 0, size);
+
+		buf_info.cpu_linear_addr =
+			fw_sequ[i].ddbuf_info.cpu_virt_addr;
+		buf_info.buf_size = size;
+		buf_info.fd = -1;
+		buf_info.buf_id = fw_sequ[i].ddbuf_info.buf_id;
+		buf_info.mem_attrib =
+			(enum sys_emem_attrib)(SYS_MEMATTRIB_UNCACHED | SYS_MEMATTRIB_WRITECOMBINE |
+			SYS_MEMATTRIB_INPUT | SYS_MEMATTRIB_CPU_WRITE);
+
+		ret = core_stream_map_buf(ctx->res_str_id, VDEC_BUFTYPE_BITSTREAM, &buf_info,
+					  &fw_sequ[i].ddbuf_info.bufmap_id);
+		if (ret) {
+			dev_err(dev, "sps core_stream_map_buf failed\n");
+			return ret;
+		}
+	}
+
+#ifdef HAS_JPEG
+	if (vid_std == VDEC_STD_JPEG)
+		return 0;
+#endif
+
+	size = vid_std == VDEC_STD_HEVC ?
+		sizeof(struct hevcfw_picture_ps) : sizeof(struct h264fw_picture_ps);
+
+	for (i = 0; i < MAX_PPSS; i++) {
+		ret = img_mem_alloc(vxd_dev->dev, ctx->mem_ctx, heap_id, size,
+				    (enum mem_attr)attributes,
+				    (int *)&fw_pps[i].ddbuf_info.buf_id);
+		if (ret) {
+			dev_err(dev, "Couldn't allocate sequ buffer %d\n", i);
+			return -ENOMEM;
+		}
+		ret = img_mem_map_km(ctx->mem_ctx, fw_pps[i].ddbuf_info.buf_id);
+		if (ret) {
+			dev_err(dev, "Couldn't map sequ buffer %d\n", i);
+			return -ENOMEM;
+		}
+		fw_pps[i].ddbuf_info.cpu_virt_addr = img_mem_get_kptr(ctx->mem_ctx,
+								      fw_pps[i].ddbuf_info.buf_id);
+		fw_pps[i].buf_offset = 0;
+		fw_pps[i].buf_element_size = size;
+		fw_pps[i].ddbuf_info.buf_size = size;
+		fw_pps[i].ddbuf_info.mem_attrib = (enum sys_emem_attrib)attributes;
+		memset(fw_pps[i].ddbuf_info.cpu_virt_addr, 0, size);
+
+		buf_info.cpu_linear_addr =
+			fw_pps[i].ddbuf_info.cpu_virt_addr;
+		buf_info.buf_size = size;
+		buf_info.fd = -1;
+		buf_info.buf_id = fw_pps[i].ddbuf_info.buf_id;
+		buf_info.mem_attrib =
+			(enum sys_emem_attrib)(SYS_MEMATTRIB_UNCACHED | SYS_MEMATTRIB_WRITECOMBINE |
+			SYS_MEMATTRIB_INPUT | SYS_MEMATTRIB_CPU_WRITE);
+
+		ret = core_stream_map_buf(ctx->res_str_id, VDEC_BUFTYPE_BITSTREAM, &buf_info,
+					  &fw_pps[i].ddbuf_info.bufmap_id);
+		if (ret) {
+			dev_err(dev, "pps core_stream_map_buf failed\n");
+			return ret;
+		}
+	}
+	return 0;
+}
diff --git a/drivers/staging/media/vxd/decoder/vxd_dec.h b/drivers/staging/media/vxd/decoder/vxd_dec.h
new file mode 100644
index 000000000000..a8d409bc4212
--- /dev/null
+++ b/drivers/staging/media/vxd/decoder/vxd_dec.h
@@ -0,0 +1,477 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * IMG DEC SYSDEV and UI Interface header
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ *	Amit Makani <amit.makani@ti.com>
+ *
+ * Re-written for upstreamimg
+ *	Sidraya Jayagond <sidraya.bj@pathpartnertech.com>
+ *	Prashanth Kumar Amai <prashanth.ka@pathpartnertech.com>
+ */
+
+#ifndef _VXD_DEC_H
+#define _VXD_DEC_H
+
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-mem2mem.h>
+#include <linux/types.h>
+
+#include "bspp.h"
+#include "img_dec_common.h"
+#include "img_mem_man.h"
+#include "img_pixfmts.h"
+#include "pixel_api.h"
+#include "vdecdd_defs.h"
+#include "vdec_defs.h"
+#include "work_queue.h"
+
+#define VXD_MIN_STREAM_ID 1
+#define VXD_MAX_STREAMS_PER_DEV 254
+#define VXD_MAX_STREAM_ID (VXD_MIN_STREAM_ID + VXD_MAX_STREAMS_PER_DEV)
+
+#define CODEC_NONE -1
+#define CODEC_H264_DEC 0
+#define CODEC_MPEG4_DEC 1
+#define CODEC_VP8_DEC 2
+#define CODEC_VC1_DEC 3
+#define CODEC_MPEG2_DEC 4
+#define CODEC_JPEG_DEC 5
+#define CODEC_VP9_DEC 6
+#define CODEC_HEVC_DEC 7
+
+#define MAX_SEGMENTS 6
+#define HW_ALIGN 64
+
+#define MAX_BUF_TRACE 30
+
+#define MAX_CAPBUFS_H264 16
+#define DISPLAY_LAG 3
+#define HEVC_MAX_LUMA_PS 35651584
+
+#define MAX_PLANES 3
+
+enum {
+	Q_DATA_SRC         = 0,
+	Q_DATA_DST         = 1,
+	Q_DATA_FORCE32BITS = 0x7FFFFFFFU
+};
+
+enum {
+	IMG_DEC_FMT_TYPE_CAPTURE     = 0x01,
+	IMG_DEC_FMT_TYPE_OUTPUT      = 0x10,
+	IMG_DEC_FMT_TYPE_FORCE32BITS = 0x7FFFFFFFU
+};
+
+enum vxd_map_flags {
+	VXD_MAP_FLAG_NONE        = 0x0,
+	VXD_MAP_FLAG_READ_ONLY   = 0x1,
+	VXD_MAP_FLAG_WRITE_ONLY  = 0x2,
+	VXD_MAP_FLAG_FORCE32BITS = 0x7FFFFFFFU
+};
+
+/*
+ * struct vxd_fw_msg - This structure holds the information about the message
+ *                     exchanged in read/write between Kernel and firmware.
+ *
+ * @out_flags: indicating the type of message
+ * @payload_size: size of payload in bytes
+ * @payload: data which is send to firmware
+ */
+struct vxd_fw_msg {
+	unsigned int out_flags;
+	unsigned int payload_size;
+	unsigned int payload[0];
+};
+
+/* HW state */
+struct vxd_hw_state {
+	unsigned int fw_counter;
+	unsigned int fe_status[VXD_MAX_PIPES];
+	unsigned int be_status[VXD_MAX_PIPES];
+	unsigned int dmac_status[VXD_MAX_PIPES][2]; /* Cover DMA chan 2/3*/
+	unsigned int irq_status;
+};
+
+/*
+ * struct vxd_state - contains VXD HW state
+ *
+ * @hw_state: HW state
+ * @msg_id_tail: msg id of the oldest item being processed
+ */
+struct vxd_state {
+	struct vxd_hw_state hw_state;
+	unsigned short msg_id_tail;
+};
+
+/*
+ * struct vxd_dec_fmt - contains info for each of the supported video format
+ *
+ * @fourcc: V4L2 pixel format FCC identifier
+ * @num_planes: number of planes required for luma and chroma
+ * @type: CAPTURE or OUTPUT
+ * @std: VDEC video standard
+ * @pixfmt: IMG pixel format
+ * @interleave: Chroma interleave order
+ * @idc: Chroma format
+ * @size_num: Numberator used to calculate image size
+ * @size_den: Denominator used to calculate image size
+ * @bytes_pp: Bytes per pixel for this format
+ */
+struct vxd_dec_fmt {
+	unsigned int fourcc;
+	unsigned int num_planes;
+	unsigned char type;
+	enum vdec_vid_std std;
+	enum img_pixfmt pixfmt;
+	enum pixel_chroma_interleaved interleave;
+	enum pixel_fmt_idc idc;
+	int size_num;
+	int size_den;
+	int bytes_pp;
+};
+
+/*
+ * struct vxd_item - contains information about the item sent to fw
+ *
+ * @list: item to be linked list to items_done, msgs, or pend.
+ * @stream_id: stream id
+ * @msg_id: message id
+ * @destroy: item belongs to the stream which is destroyed
+ * @msg: contains msg between kernel and fw
+ */
+struct vxd_item {
+	struct list_head list;
+	unsigned int stream_id;
+	unsigned int msg_id;
+	struct {
+		unsigned destroy : 1;
+	};
+	struct vxd_fw_msg msg;
+};
+
+enum vxd_cb_type {
+	VXD_CB_STRUNIT_PROCESSED,
+	VXD_CB_SPS_RELEASE,
+	VXD_CB_PPS_RELEASE,
+	VXD_CB_PICT_DECODED,
+	VXD_CB_PICT_DISPLAY,
+	VXD_CB_PICT_RELEASE,
+	VXD_CB_PICT_END,
+	VXD_CB_STR_END,
+	VXD_CB_ERROR_FATAL,
+	VXD_CB_FORCE32BITS = 0x7FFFFFFFU
+};
+
+/*
+ * vxd_cb - Return a resource to vxd
+ *
+ * @ctx: the vxd stream context
+ * @type: the type of message
+ * @buf_map_id: the buf_map_id of the resource being returned
+ */
+typedef void (*vxd_cb)(void *ctx, enum vxd_cb_type type, unsigned int buf_map_id);
+
+/*
+ * struct vxd_return - contains information about items returning from core
+ *
+ * @type: Type of item being returned
+ * @buf_map_id: mmu mapped id of buffer being returned
+ */
+struct vxd_return {
+	void *work;
+	struct vxd_dec_ctx *ctx;
+	enum vxd_cb_type type;
+	unsigned int buf_map_id;
+};
+
+/*
+ * struct vxd_dec_q_data - contains queue data information
+ *
+ * @fmt: format info
+ * @width: frame width
+ * @height: frame height
+ * @bytesperline: bytes per line in memory
+ * @size_image: image size in memory
+ */
+struct vxd_dec_q_data {
+	struct vxd_dec_fmt *fmt;
+	unsigned int width;
+	unsigned int height;
+	unsigned int bytesperline[MAX_PLANES];
+	unsigned int size_image[MAX_PLANES];
+};
+
+/*
+ * struct time_prof - contains time taken by decoding information
+ *
+ * @id: id info
+ * @start_time: start time
+ * @end_time: end time
+ */
+struct time_prof {
+	unsigned int id;
+	long long start_time;
+	long long end_time;
+};
+
+/*
+ * struct vxd_dev - The struct containing decoder driver internal parameters.
+ *
+ * @v4l2_dev: main struct of V4L2 device drivers
+ * @dev: platform device driver
+ * @vfd_dec: video device structure to create and manage the V4L2 device node.
+ * @plat_dev: linux platform device
+ * @struct v4l2_m2m_dev: mem2mem device
+ * @mutex: mutex to protect certain ongoing operation.
+ * @module_irq: a threaded request IRQ for the device
+ * @reg_base: base address of the IMG VXD hw registers
+ * @props: contains HW properties
+ * @mmu_config_addr_width: indicates the number of extended address bits
+ *                         (above 32) that the external memory interface
+ *                         uses, based on EXTENDED_ADDR_RANGE field of
+ *                         MMU_CONFIG0
+ * @rendec_buf_id: buffer id for rendec buffer allocation
+ * @firmware: firmware information based on vxd_dev_fw structure
+ * @firmware_loading_complete: loading completion
+ * @no_fw: Just to check if firmware is present in /lib
+ * @fw_refcnt: firmware reference counter
+ * @hw_on: indication if hw is on or off
+ * @hw_dead: indication if hw is dead
+ * @lock: basic primitive for locking through spinlock
+ * @state: internal state handling of vxd state
+ * @msgs: linked list of msgs with vxd_item
+ * @pend: linked list of pending msgs to be sent to fw
+ * @msg_cnt: counter of messages submitted to VXD. Wraps every VXD_MSG_ID_MASK
+ * @freq_khz: Core clock frequency measured during boot of firmware
+ * @streams: unique id for the stream
+ * @mem_ctx: memory management context for HW buffers
+ * @dwork: use for Power Management and Watchdog
+ * @work_sched_at: the time of the last work has been scheduled at
+ * @emergency: indicates if emergency condition occurred
+ * @dbgfs_ctx: pointer to debug FS context.
+ * @hw_pm_delay: delay before performaing PM
+ * @hw_dwr_period: period for checking for dwr
+ * @pm_start: time, in jiffies, when core become idle
+ * @dwr_start: time, in jiffies, when dwr has been started
+ */
+struct vxd_dev {
+	struct v4l2_device v4l2_dev;
+	void *dev;
+	struct video_device *vfd_dec;
+	struct platform_device *plat_dev;
+	struct v4l2_m2m_dev *m2m_dev;
+	struct mutex  *mutex; /* Per device mutex */
+	int module_irq;
+	void __iomem *reg_base;
+	struct vxd_core_props props;
+	unsigned int mmu_config_addr_width;
+	int rendec_buf_id;
+	struct vxd_dev_fw firmware;
+	void *firmware_loading_complete;
+	unsigned char no_fw;
+	unsigned char fw_refcnt;
+	unsigned int hw_on;
+	unsigned int hw_dead;
+	void *lock; /* basic device level spinlock */
+	struct vxd_state state;
+	struct list_head msgs;
+	struct list_head pend;
+	int msg_cnt;
+	unsigned int freq_khz;
+	struct idr *streams;
+	struct mem_ctx *mem_ctx;
+	void *dwork;
+	unsigned long long work_sched_at;
+	unsigned int emergency;
+	void *dbgfs_ctx;
+	unsigned int hw_pm_delay;
+	unsigned int hw_dwr_period;
+	unsigned long long pm_start;
+	unsigned long long dwr_start;
+	struct time_prof time_fw[MAX_BUF_TRACE];
+	struct time_prof time_drv[MAX_BUF_TRACE];
+
+	/* The variables defined below are used in RTOS only. */
+	/* This variable holds queue handler */
+	void *vxd_worker_queue_handle;
+	void *vxd_worker_queue_sem_handle;
+};
+
+/*
+ * struct vxd_stream - holds stream-related info
+ *
+ * @ctx: associated vxd_dec_ctx
+ * @mmu_ctx: MMU context for this stream
+ * @ptd: ptd for the stream
+ * @id: unique stream id
+ */
+struct vxd_stream {
+	struct vxd_dec_ctx *ctx;
+	struct mmu_ctx *mmu_ctx;
+	unsigned int ptd;
+	unsigned int id;
+};
+
+/*
+ * struct vxd_buffer - holds per buffer info.
+ * @buffer: the vb2_v4l2_buffer
+ * @list: list head for gathering in linked list
+ * @mapped: is this buffer mapped yet
+ * @reuse: is the buffer ready for reuse
+ * @buf_map_id: the mapped buffer id
+ * @buf_info: the buffer info for submitting to map
+ * @bstr_info: the buffer info for submitting to bspp
+ * @seq_unit: the str_unit for submitting sps
+ * @seq_unit: the str_unit for submitting pps and segments
+ * @seq_unit: the str_unit for submitting picture_end
+ */
+struct vxd_buffer {
+	struct v4l2_m2m_buffer buffer;
+	struct list_head list;
+	unsigned char mapped;
+	unsigned char reuse;
+	unsigned int buf_map_id;
+	struct vdec_buf_info buf_info;
+	struct bspp_ddbuf_info bstr_info;
+	struct vdecdd_str_unit seq_unit;
+	struct vdecdd_str_unit pic_unit;
+	struct vdecdd_str_unit end_unit;
+	struct bspp_preparsed_data preparsed_data;
+};
+
+typedef void (*decode_cb)(int res_str_id, unsigned int *msg, unsigned int msg_size,
+			  unsigned int msg_flags);
+
+/*
+ * struct vxd_dec_ctx - holds per stream data. Each playback has its own
+ *                      vxd_dec_ctx
+ *
+ * @fh: V4L2 file handler
+ * @dev: pointer to the device main information.
+ * @ctrl_hdl_dec: v4l2 custom control command for video decoder
+ * @mem_ctx: mem context for this stream
+ * @mmu_ctx: MMU context for this stream
+ * @ptd: page table information
+ * @items_done: linked list of items is ready
+ * @width: frame width
+ * @height: frame height
+ * @width_orig: original frame width (before padding)
+ * @height_orig: original frame height (before padding)
+ * @q_data: Queue data information of src[0] and dst[1]
+ * @stream: stream-related info
+ * @work: work queue for message handling
+ * @return_queue: list of resources returned from core
+ * @out_buffers: list of all output buffers
+ * @cap_buffers: list of all capture buffers except those in reuse_queue
+ * @reuse_queue: list of capture buffers waiting for core to signal reuse
+ * @res_str_id: Core stream id
+ * @stream_created: Core stream is created
+ * @stream_configured: Core stream is configured
+ * @opconfig_pending: Core opconfig is pending stream_create
+ * @src_streaming: V4L2 src stream is streaming
+ * @dst_streaming: V4L2 dst stream is streaming
+ * @core_streaming: core is streaming
+ * @aborting: signal job abort on next irq
+ * @str_opcfg: core output config
+ * @pict_bufcfg: core picture buffer config
+ * @bspp_context: BSPP Stream context handle
+ * @seg_list: list of bspp_bitstr_seg for submitting to BSPP
+ * @fw_sequ: BSPP sps resource
+ * @fw_pps: BSPP pps resource
+ * @cb: registered callback for incoming messages
+ * @mutex: mutex to protect context specific state machine
+ */
+struct vxd_dec_ctx {
+	struct v4l2_fh fh;
+	struct vxd_dev *dev;
+	struct mem_ctx *mem_ctx;
+	struct mmu_ctx *mmu_ctx;
+	unsigned int ptd;
+	struct list_head items_done;
+	unsigned int width;
+	unsigned int height;
+	unsigned int width_orig;
+	unsigned int height_orig;
+	struct vxd_dec_q_data q_data[2];
+	struct vxd_stream stream;
+	void *work;
+	struct list_head return_queue;
+	struct list_head out_buffers;
+	struct list_head cap_buffers;
+	struct list_head reuse_queue;
+	unsigned int res_str_id;
+	unsigned char stream_created;
+	unsigned char stream_configured;
+	unsigned char opconfig_pending;
+	unsigned char src_streaming;
+	unsigned char dst_streaming;
+	unsigned char core_streaming;
+	unsigned char aborting;
+	unsigned char eos;
+	unsigned char stop_initiated;
+	unsigned char flag_last;
+	unsigned char num_decoding;
+	unsigned int max_num_ref_frames;
+	struct vdec_str_opconfig str_opcfg;
+	struct vdec_pict_bufconfig pict_bufcfg;
+	void *bspp_context;
+	struct bspp_bitstr_seg bstr_segments[MAX_SEGMENTS];
+	struct lst_t seg_list;
+	struct bspp_ddbuf_array_info fw_sequ[MAX_SEQUENCES];
+	struct bspp_ddbuf_array_info fw_pps[MAX_PPSS];
+	decode_cb cb;
+	struct mutex *mutex; /* Per stream mutex */
+
+	/* The below variable used only in Rtos */
+	void *mm_return_resource; /* Place holder for CB to application */
+	void *stream_worker_queue_handle;
+	void *stream_worker_queue_sem_handle;
+	// lock is used to synchronize the stream worker and process function
+	void *lock;
+	/* "sem_eos" this semaphore variable used to wait until all frame decoded */
+	void *sem_eos;
+};
+
+irqreturn_t vxd_handle_irq(void *dev);
+irqreturn_t vxd_handle_thread_irq(void *dev);
+int vxd_init(void *dev, struct vxd_dev *vxd, const struct heap_config heap_configs[], int heaps);
+int vxd_g_internal_heap_id(void);
+void vxd_deinit(struct vxd_dev *vxd);
+int vxd_prepare_fw(struct vxd_dev *vxd);
+void vxd_clean_fw_resources(struct vxd_dev *vxd);
+int vxd_send_msg(struct vxd_dec_ctx *ctx, struct vxd_fw_msg *msg);
+int vxd_suspend_dev(void *dev);
+int vxd_resume_dev(void *dev);
+
+int vxd_create_ctx(struct vxd_dev *vxd, struct vxd_dec_ctx *ctx);
+void vxd_destroy_ctx(struct vxd_dev *vxd, struct vxd_dec_ctx *ctx);
+
+int vxd_map_buffer_sg(struct vxd_dev *vxd, struct vxd_dec_ctx *ctx,
+		      unsigned int str_id, unsigned int buff_id,
+		      void *sgt, unsigned int virt_addr,
+		      unsigned int map_flags);
+int vxd_map_buffer(struct vxd_dev *vxd, struct vxd_dec_ctx *ctx, unsigned int str_id,
+		   unsigned int buff_id, unsigned int virt_addr, unsigned int map_flags);
+int vxd_unmap_buffer(struct vxd_dev *vxd, struct vxd_dec_ctx *ctx,
+		     unsigned int str_id, unsigned int buff_id);
+
+unsigned int get_nbuffers(enum vdec_vid_std std, int w, int h, unsigned int max_num_ref_frames);
+
+int vxd_dec_alloc_bspp_resource(struct vxd_dec_ctx *ctx, enum vdec_vid_std vid_std);
+
+#ifdef ERROR_RECOVERY_SIMULATION
+/* sysfs read write functions */
+ssize_t vxd_sysfs_show(struct kobject *vxd_dec_kobject,
+		       struct kobj_attribute *attr, char *buf);
+
+ssize_t vxd_sysfs_store(struct kobject *vxd_dec_kobject,
+			struct kobj_attribute *attr, const char *buf, unsigned long count);
+#endif
+#endif /* _VXD_DEC_H */
-- 
2.17.1


-- 






This
message contains confidential information and is intended only 
for the
individual(s) named. If you are not the intended
recipient, you are 
notified that disclosing, copying, distributing or taking any
action in 
reliance on the contents of this mail and attached file/s is strictly
prohibited. Please notify the
sender immediately and delete this e-mail 
from your system. E-mail transmission
cannot be guaranteed to be secured or 
error-free as information could be
intercepted, corrupted, lost, destroyed, 
arrive late or incomplete, or contain
viruses. The sender therefore does 
not accept liability for any errors or
omissions in the contents of this 
message, which arise as a result of e-mail
transmission.

^ permalink raw reply related	[flat|nested] 48+ messages in thread

* [PATCH 06/30] v4l: vxd-dec: Add hardware control modules
  2021-08-18 14:10 [PATCH 00/30] TI Video Decoder driver upstreaming to v5.14-rc6 kernel sidraya.bj
                   ` (4 preceding siblings ...)
  2021-08-18 14:10 ` [PATCH 05/30] v4l: vxd-dec: Add IMG VXD Video Decoder mem to mem drive sidraya.bj
@ 2021-08-18 14:10 ` sidraya.bj
  2021-08-18 14:10 ` [PATCH 07/30] v4l: vxd-dec: Add vxd core module sidraya.bj
                   ` (25 subsequent siblings)
  31 siblings, 0 replies; 48+ messages in thread
From: sidraya.bj @ 2021-08-18 14:10 UTC (permalink / raw)
  To: gregkh, linux-staging, linux-kernel
  Cc: prashanth.ka, praneeth, mchehab, linux-media, praveen.ap, Sidraya

From: Sidraya <sidraya.bj@pathpartnertech.com>

The TI Video Decoder uses IMG D5520 to provide video
decoding for H.264 codec and this patch handles firmware
messages transaction with firmware.
It prepares the batch and fragment messages for firmware.

Signed-off-by: Amit Makani <amit.makani@ti.com>
Signed-off-by: Sidraya <sidraya.bj@pathpartnertech.com>
---
 MAINTAINERS                                   |    2 +
 .../staging/media/vxd/decoder/hw_control.c    | 1211 +++++++++++++++++
 .../staging/media/vxd/decoder/hw_control.h    |  144 ++
 3 files changed, 1357 insertions(+)
 create mode 100644 drivers/staging/media/vxd/decoder/hw_control.c
 create mode 100644 drivers/staging/media/vxd/decoder/hw_control.h

diff --git a/MAINTAINERS b/MAINTAINERS
index 47067f907539..2327ea12caa6 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -19542,6 +19542,8 @@ F:	drivers/staging/media/vxd/common/img_mem_man.h
 F:	drivers/staging/media/vxd/common/img_mem_unified.c
 F:	drivers/staging/media/vxd/common/imgmmu.c
 F:	drivers/staging/media/vxd/common/imgmmu.h
+F:	drivers/staging/media/vxd/decoder/hw_control.c
+F:	drivers/staging/media/vxd/decoder/hw_control.h
 F:	drivers/staging/media/vxd/decoder/img_dec_common.h
 F:	drivers/staging/media/vxd/decoder/vxd_core.c
 F:	drivers/staging/media/vxd/decoder/vxd_dec.c
diff --git a/drivers/staging/media/vxd/decoder/hw_control.c b/drivers/staging/media/vxd/decoder/hw_control.c
new file mode 100644
index 000000000000..049d9bbcd52c
--- /dev/null
+++ b/drivers/staging/media/vxd/decoder/hw_control.c
@@ -0,0 +1,1211 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * VXD DEC Hardware control implementation
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ *	Amit Makani <amit.makani@ti.com>
+ *
+ * Re-written for upstreamimg
+ *	Sidraya Jayagond <sidraya.bj@pathpartnertech.com>
+ *	Prashanth Kumar Amai <prashanth.ka@pathpartnertech.com>
+ */
+
+#include <linux/types.h>
+#include <linux/dma-mapping.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-mem2mem.h>
+
+#include "decoder.h"
+#include "hw_control.h"
+#include "img_msvdx_vdmc_regs.h"
+#include "img_pvdec_core_regs.h"
+#include "img_pvdec_pixel_regs.h"
+#include "img_pvdec_test_regs.h"
+#include "img_vdec_fw_msg.h"
+#include "img_video_bus4_mmu_regs.h"
+#include "img_msvdx_core_regs.h"
+#include "reg_io2.h"
+#include "vdecdd_defs.h"
+#include "vxd_dec.h"
+#include "vxd_ext.h"
+#include "vxd_int.h"
+#include "vxd_pvdec_priv.h"
+
+#define MSG_GROUP_MASK  0xf0
+
+struct hwctrl_ctx {
+	unsigned int is_initialised;
+	unsigned int is_on_seq_replay;
+	unsigned int replay_tid;
+	unsigned int num_pipes;
+	struct vdecdd_dd_devconfig devconfig;
+	void *hndl_vxd;
+	void *dec_core;
+	void *comp_init_userdata;
+	struct vidio_ddbufinfo dev_ptd_bufinfo;
+	struct lst_t pend_pict_list;
+	struct hwctrl_msgstatus host_msg_status;
+	void *hmsg_task_event;
+	void *hmsg_task_kick;
+	void *hmsg_task;
+	unsigned int is_msg_task_active;
+	struct hwctrl_state state;
+	struct hwctrl_state prev_state;
+	unsigned int is_prev_hw_state_set;
+	unsigned int is_fatal_state;
+};
+
+struct vdeckm_context {
+	unsigned int core_num;
+	struct vxd_coreprops props;
+	unsigned short current_msgid;
+	unsigned char reader_active;
+	void *comms_ram_addr;
+	unsigned int state_offset;
+	unsigned int state_size;
+};
+
+/*
+ * Panic reason identifier.
+ */
+enum pvdec_panic_reason {
+	PANIC_REASON_OTHER = 0,
+	PANIC_REASON_WDT,
+	PANIC_REASON_READ_TIMEOUT,
+	PANIC_REASON_CMD_TIMEOUT,
+	PANIC_REASON_MMU_FAULT,
+	PANIC_REASON_MAX,
+	PANIC_REASON_FORCE32BITS = 0x7FFFFFFFU
+};
+
+/*
+ * Panic reason strings.
+ * NOTE: Should match the pvdec_panic_reason ids.
+ */
+static unsigned char *apanic_reason[PANIC_REASON_MAX] = {
+	[PANIC_REASON_OTHER] = "Other",
+	[PANIC_REASON_WDT] = "Watch Dog Timeout",
+	[PANIC_REASON_READ_TIMEOUT] = "Read Timeout",
+	[PANIC_REASON_CMD_TIMEOUT] = "Command Timeout",
+	[PANIC_REASON_MMU_FAULT] = "MMU Page Fault"
+};
+
+/*
+ * Maximum length of the panic reason string.
+ */
+#define PANIC_REASON_LEN  (255)
+
+static struct vdeckm_context acore_ctx[VXD_MAX_CORES] = {0};
+
+static int vdeckm_getregsoffsets(const void *hndl_vxd,
+				 struct decoder_regsoffsets *regs_offsets)
+{
+	struct vdeckm_context *core_ctx = (struct vdeckm_context *)hndl_vxd;
+
+	if (!core_ctx)
+		return IMG_ERROR_INVALID_PARAMETERS;
+
+	regs_offsets->vdmc_cmd_offset = MSVDX_CMD_OFFSET;
+	regs_offsets->vec_offset = MSVDX_VEC_OFFSET;
+	regs_offsets->entropy_offset = PVDEC_ENTROPY_OFFSET;
+	regs_offsets->vec_be_regs_offset = PVDEC_VEC_BE_OFFSET;
+	regs_offsets->vdec_be_codec_regs_offset = PVDEC_VEC_BE_CODEC_OFFSET;
+
+	return IMG_SUCCESS;
+}
+
+static int vdeckm_send_message(const void *hndl_vxd,
+			       struct hwctrl_to_kernel_msg *to_kernelmsg,
+			       void *vxd_dec_ctx)
+{
+	struct vdeckm_context *core_ctx = (struct vdeckm_context *)hndl_vxd;
+	unsigned int count = 0;
+	unsigned int *msg;
+
+	if (!core_ctx || !to_kernelmsg)
+		return IMG_ERROR_INVALID_PARAMETERS;
+
+	msg = kzalloc(VXD_SIZE_MSG_BUFFER, GFP_KERNEL);
+	if (!msg)
+		return IMG_ERROR_OUT_OF_MEMORY;
+
+	msg[count++] = to_kernelmsg->flags;
+	msg[count++] = to_kernelmsg->msg_size;
+
+	memcpy(&msg[count], to_kernelmsg->msg_hdr, to_kernelmsg->msg_size);
+
+	core_ctx->reader_active = 1;
+
+	if (!(to_kernelmsg->msg_hdr)) {
+		kfree(msg);
+		return IMG_ERROR_INVALID_PARAMETERS;
+	}
+
+	pr_debug("[HWCTRL] adding message to vxd queue\n");
+	vxd_send_msg(vxd_dec_ctx, (struct vxd_fw_msg *)msg);
+
+	kfree(msg);
+
+	return 0;
+}
+
+static void vdeckm_return_msg(const void *hndl_vxd,
+			      struct hwctrl_to_kernel_msg *to_kernelmsg)
+{
+	if (to_kernelmsg)
+		kfree(to_kernelmsg->msg_hdr);
+}
+
+static int vdeckm_handle_mtxtohost_msg(unsigned int *msg, struct lst_t *pend_pict_list,
+				       enum vxd_msg_attr *msg_attr,
+				       struct dec_decpict  **decpict,
+				       unsigned char msg_type,
+				       unsigned int trans_id)
+{
+	struct dec_decpict *pdec_pict;
+
+	switch (msg_type) {
+	case FW_DEVA_COMPLETED:
+	{
+		struct dec_pict_attrs *pict_attrs = NULL;
+		unsigned short error_flags = 0;
+		unsigned int no_bewdts = 0;
+		unsigned int mbs_dropped = 0;
+		unsigned int mbs_recovered = 0;
+		unsigned char flag = 0;
+
+		pr_debug("Received message from firmware\n");
+		error_flags = MEMIO_READ_FIELD(msg, FW_DEVA_COMPLETED_ERROR_FLAGS);
+
+		no_bewdts = MEMIO_READ_FIELD(msg, FW_DEVA_COMPLETED_NUM_BEWDTS);
+
+		mbs_dropped = MEMIO_READ_FIELD(msg, FW_DEVA_COMPLETED_NUM_MBSDROPPED);
+
+		mbs_recovered = MEMIO_READ_FIELD(msg, FW_DEVA_COMPLETED_NUM_MBSRECOVERED);
+
+		pdec_pict = lst_first(pend_pict_list);
+		while (pdec_pict) {
+			if (pdec_pict->transaction_id == trans_id)
+				break;
+			pdec_pict = lst_next(pdec_pict);
+		}
+		/*
+		 * We must have a picture in the list that matches
+		 * the transaction id
+		 */
+		if (!pdec_pict)
+			return IMG_ERROR_FATAL;
+
+		if (!(pdec_pict->first_fld_fwmsg) || !(pdec_pict->second_fld_fwmsg))
+			return IMG_ERROR_FATAL;
+
+		flag = pdec_pict->first_fld_fwmsg->pict_attrs.first_fld_rcvd;
+		if (flag) {
+			pict_attrs = &pdec_pict->second_fld_fwmsg->pict_attrs;
+		} else {
+			pict_attrs = &pdec_pict->first_fld_fwmsg->pict_attrs;
+			flag = 1;
+		}
+
+		pict_attrs->fe_err = (unsigned int)error_flags;
+		pict_attrs->no_be_wdt = no_bewdts;
+		pict_attrs->mbs_dropped = mbs_dropped;
+		pict_attrs->mbs_recovered = mbs_recovered;
+		/*
+		 * We may successfully replayed the picture,
+		 * so reset the error flags
+		 */
+		pict_attrs->pict_attrs.dwrfired = 0;
+		pict_attrs->pict_attrs.mmufault = 0;
+		pict_attrs->pict_attrs.deverror = 0;
+
+		*msg_attr = VXD_MSG_ATTR_DECODED;
+		*decpict = pdec_pict;
+		break;
+	}
+
+	case FW_DEVA_PANIC:
+	{
+		unsigned int panic_info =  MEMIO_READ_FIELD(msg, FW_DEVA_PANIC_ERROR_INT);
+		unsigned char panic_reason[PANIC_REASON_LEN] = "Reason(s): ";
+		unsigned char is_panic_reson_identified = 0;
+		/*
+		 * Create panic reason string.
+		 */
+		if (REGIO_READ_FIELD(panic_info, PVDEC_CORE, CR_PVDEC_HOST_INTERRUPT_STATUS,
+				     CR_HOST_SYS_WDT)) {
+			strncat(panic_reason, apanic_reason[PANIC_REASON_WDT],
+				PANIC_REASON_LEN - 1);
+			is_panic_reson_identified = 1;
+		}
+		if (REGIO_READ_FIELD(panic_info, PVDEC_CORE, CR_PVDEC_HOST_INTERRUPT_STATUS,
+				     CR_HOST_READ_TIMEOUT_PROC_IRQ)) {
+			strncat(panic_reason, apanic_reason[PANIC_REASON_READ_TIMEOUT],
+				PANIC_REASON_LEN - 1);
+			is_panic_reson_identified = 1;
+		}
+		if (REGIO_READ_FIELD(panic_info, PVDEC_CORE, CR_PVDEC_HOST_INTERRUPT_STATUS,
+				     CR_HOST_COMMAND_TIMEOUT_PROC_IRQ)) {
+			strncat(panic_reason, apanic_reason[PANIC_REASON_CMD_TIMEOUT],
+				PANIC_REASON_LEN - 1);
+			is_panic_reson_identified = 1;
+		}
+		if (!is_panic_reson_identified) {
+			strncat(panic_reason, apanic_reason[PANIC_REASON_OTHER],
+				PANIC_REASON_LEN - 1);
+		}
+		panic_reason[strlen(panic_reason) - 2] = 0;
+		if (trans_id != 0)
+			pr_err("TID=0x%08X [FIRMWARE PANIC %s]\n", trans_id, panic_reason);
+		else
+			pr_err("TID=NULL [GENERAL FIRMWARE PANIC %s]\n", panic_reason);
+
+		break;
+	}
+
+	case FW_ASSERT:
+	{
+		unsigned int fwfile_namehash = MEMIO_READ_FIELD(msg, FW_ASSERT_FILE_NAME_HASH);
+		unsigned int fwfile_line = MEMIO_READ_FIELD(msg, FW_ASSERT_FILE_LINE);
+
+		pr_err("ASSERT file name hash:0x%08X line number:%d\n",
+		       fwfile_namehash, fwfile_line);
+		break;
+	}
+
+	case FW_SO:
+	{
+		unsigned int task_name = MEMIO_READ_FIELD(msg, FW_SO_TASK_NAME);
+		unsigned char sztaskname[sizeof(unsigned int) + 1];
+
+		sztaskname[0] = task_name >> 24;
+		sztaskname[1] = (task_name >> 16) & 0xff;
+		sztaskname[2] = (task_name >> 8) & 0xff;
+		sztaskname[3] = task_name & 0xff;
+		if (sztaskname[3] != 0)
+			sztaskname[4] = 0;
+		pr_warn("STACK OVERFLOW for %s task\n", sztaskname);
+		break;
+	}
+
+	case FW_VXD_EMPTY_COMPL:
+		/*
+		 * Empty completion message sent as response to init,
+		 * configure etc The architecture of vxd.ko module
+		 * requires the firmware to send a reply for every
+		 * message submitted by the user space.
+		 */
+		break;
+
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+static int vdeckm_handle_hosttomtx_msg(unsigned int *msg, struct lst_t *pend_pict_list,
+				       enum vxd_msg_attr *msg_attr,
+				       struct dec_decpict  **decpict,
+				       unsigned char msg_type,
+				       unsigned int trans_id,
+				       unsigned int msg_flags)
+{
+	struct dec_decpict *pdec_pict;
+
+	pr_debug("Received message from HOST\n");
+
+	switch (msg_type) {
+	case FW_DEVA_PARSE:
+	{
+		struct dec_pict_attrs *pict_attrs = NULL;
+		unsigned char flag = 0;
+
+		pdec_pict = lst_first(pend_pict_list);
+		while (pdec_pict) {
+			if (pdec_pict->transaction_id == trans_id)
+				break;
+
+			pdec_pict = lst_next(pdec_pict);
+		}
+
+		/*
+		 * We must have a picture in the list that matches
+		 * the transaction id
+		 */
+		if (!pdec_pict) {
+			pr_err("Firmware decoded message received\n");
+			pr_err("no pending picture\n");
+			return IMG_ERROR_FATAL;
+		}
+
+		if (!(pdec_pict->first_fld_fwmsg) || !(pdec_pict->second_fld_fwmsg)) {
+			pr_err("invalid pending picture struct\n");
+			return IMG_ERROR_FATAL;
+		}
+
+		flag = pdec_pict->first_fld_fwmsg->pict_attrs.first_fld_rcvd;
+		if (flag) {
+			pict_attrs = &pdec_pict->second_fld_fwmsg->pict_attrs;
+		} else {
+			pict_attrs = &pdec_pict->first_fld_fwmsg->pict_attrs;
+			flag = 1;
+		}
+
+		/*
+		 * The below info is fetched from firmware state
+		 * afterwards, so just set this to zero for now.
+		 */
+		pict_attrs->fe_err = 0;
+		pict_attrs->no_be_wdt = 0;
+		pict_attrs->mbs_dropped = 0;
+		pict_attrs->mbs_recovered = 0;
+
+		vxd_get_pictattrs(msg_flags, &pict_attrs->pict_attrs);
+		vxd_get_msgerrattr(msg_flags, msg_attr);
+
+		if (*msg_attr == VXD_MSG_ATTR_FATAL)
+			pr_err("[TID=0x%08X] [DECODE_FAILED]\n", trans_id);
+		if (*msg_attr == VXD_MSG_ATTR_CANCELED)
+			pr_err("[TID=0x%08X] [DECODE_CANCELED]\n", trans_id);
+
+		*decpict = pdec_pict;
+		break;
+	}
+
+	case FW_DEVA_PARSE_FRAGMENT:
+		/*
+		 * Do nothing - Picture holds the list of fragments.
+		 * So, in case of any error those would be replayed
+		 * anyway.
+		 */
+		break;
+	default:
+		pr_warn("Unknown message received 0x%02x\n", msg_type);
+		break;
+	}
+
+	return 0;
+}
+
+static int vdeckm_process_msg(const void *hndl_vxd, unsigned int *msg,
+			      struct lst_t *pend_pict_list,
+			      unsigned int msg_flags,
+			      enum vxd_msg_attr *msg_attr,
+			      struct dec_decpict  **decpict)
+{
+	struct vdeckm_context *core_ctx = (struct vdeckm_context *)hndl_vxd;
+	unsigned char msg_type;
+	unsigned char msg_group;
+	unsigned int trans_id = 0;
+	struct vdec_pict_hwcrc *pict_hwcrc = NULL;
+	struct dec_decpict *pdec_pict;
+
+	if (!core_ctx || !msg || !msg_attr || !pend_pict_list || !decpict)
+		return IMG_ERROR_INVALID_PARAMETERS;
+
+	*msg_attr = VXD_MSG_ATTR_NONE;
+	*decpict = NULL;
+
+	trans_id = MEMIO_READ_FIELD(msg, FW_DEVA_GENMSG_TRANS_ID);
+	msg_type  = MEMIO_READ_FIELD(msg, FW_DEVA_GENMSG_MSG_TYPE);
+	msg_group = msg_type & MSG_GROUP_MASK;
+
+	switch (msg_group) {
+	case MSG_TYPE_START_PSR_MTXHOST_MSG:
+		vdeckm_handle_mtxtohost_msg(msg, pend_pict_list, msg_attr,
+					    decpict, msg_type, trans_id);
+		break;
+	/*
+	 * Picture decode has been returned as unprocessed.
+	 * Locate the picture with corresponding TID and mark
+	 * it as decoded with errors.
+	 */
+	case MSG_TYPE_START_PSR_HOSTMTX_MSG:
+		vdeckm_handle_hosttomtx_msg(msg, pend_pict_list, msg_attr,
+					    decpict, msg_type, trans_id,
+					    msg_flags);
+		break;
+
+	case FW_DEVA_SIGNATURES_HEVC:
+	case FW_DEVA_SIGNATURES_LEGACY:
+	{
+		unsigned int *signatures = msg + (FW_DEVA_SIGNATURES_SIGNATURES_OFFSET /
+				sizeof(unsigned int));
+		unsigned char sigcount  = MEMIO_READ_FIELD(msg, FW_DEVA_SIGNATURES_MSG_SIZE) -
+			((FW_DEVA_SIGNATURES_SIZE / sizeof(unsigned int)) - 1);
+		unsigned int selected = MEMIO_READ_FIELD(msg, FW_DEVA_SIGNATURES_SIGNATURE_SELECT);
+		unsigned char i, j = 0;
+
+		pdec_pict = lst_first(pend_pict_list);
+		while (pdec_pict) {
+			if (pdec_pict->transaction_id == trans_id)
+				break;
+			pdec_pict = lst_next(pdec_pict);
+		}
+
+		/* We must have a picture in the list that matches the tid */
+		VDEC_ASSERT(pdec_pict);
+		if (!pdec_pict) {
+			pr_err("Firmware signatures message received with no pending picture\n");
+			return IMG_ERROR_FATAL;
+		}
+
+		VDEC_ASSERT(pdec_pict->first_fld_fwmsg);
+		VDEC_ASSERT(pdec_pict->second_fld_fwmsg);
+		if (!pdec_pict->first_fld_fwmsg || !pdec_pict->second_fld_fwmsg) {
+			pr_err("Invalid pending picture struct\n");
+			return IMG_ERROR_FATAL;
+		}
+		if (pdec_pict->first_fld_fwmsg->pict_hwcrc.first_fld_rcvd) {
+			pict_hwcrc = &pdec_pict->second_fld_fwmsg->pict_hwcrc;
+		} else {
+			pict_hwcrc = &pdec_pict->first_fld_fwmsg->pict_hwcrc;
+			if (selected & (PVDEC_SIGNATURE_GROUP_20 | PVDEC_SIGNATURE_GROUP_24))
+				pdec_pict->first_fld_fwmsg->pict_hwcrc.first_fld_rcvd = TRUE;
+		}
+
+		for (i = 0; i < 32; i++) {
+			unsigned int group = selected & (1 << i);
+
+			switch (group) {
+			case PVDEC_SIGNATURE_GROUP_20:
+				pict_hwcrc->crc_vdmc_pix_recon = signatures[j++];
+				break;
+
+			case PVDEC_SIGNATURE_GROUP_24:
+				pict_hwcrc->vdeb_sysmem_wrdata = signatures[j++];
+				break;
+
+			default:
+				break;
+			}
+		}
+
+		/* sanity check */
+		sigcount -= j;
+		VDEC_ASSERT(sigcount == 0);
+
+		/*
+		 * suppress PVDEC_SIGNATURE_GROUP_1 and notify
+		 * only about groups used for verification
+		 */
+#ifdef DEBUG_DECODER_DRIVER
+		if (selected & (PVDEC_SIGNATURE_GROUP_20 | PVDEC_SIGNATURE_GROUP_24))
+			pr_info("[TID=0x%08X] [SIGNATURES]\n", trans_id);
+#endif
+
+		*decpict = pdec_pict;
+
+		break;
+	}
+
+	default: {
+#ifdef DEBUG_DECODER_DRIVER
+		unsigned short msg_size, i;
+
+		pr_warn("Unknown message type received: 0x%x", msg_type);
+
+		msg_size = MEMIO_READ_FIELD(msg, FW_DEVA_GENMSG_MSG_SIZE);
+
+		for (i = 0; i < msg_size; i++)
+			pr_info("0x%04x: 0x%08x\n", i, msg[i]);
+#endif
+		break;
+	}
+	}
+
+	return 0;
+}
+
+static void vdeckm_vlr_copy(void *dst, void *src, unsigned int size)
+{
+	unsigned int *pdst = (unsigned int *)dst;
+	unsigned int *psrc = (unsigned int *)src;
+
+	size /= 4;
+	while (size--)
+		*pdst++ = *psrc++;
+}
+
+static int vdeckm_get_core_state(const void *hndl_vxd, struct vxd_states *state)
+{
+	struct vdeckm_context *core_ctx = (struct vdeckm_context *)hndl_vxd;
+	struct vdecfw_pvdecfirmwarestate firmware_state;
+	unsigned char pipe = 0;
+
+#ifdef ERROR_RECOVERY_SIMULATION
+	/*
+	 * if disable_fw_irq_value is not zero, return error. If processed further
+	 * the kernel will crash because we have ignored the interrupt, but here
+	 * we will try to access comms_ram_addr which will result in crash.
+	 */
+	if (disable_fw_irq_value != 0)
+		return IMG_ERROR_INVALID_PARAMETERS;
+#endif
+
+	if (!core_ctx || !state)
+		return IMG_ERROR_INVALID_PARAMETERS;
+
+	/*
+	 * If state is requested for the first time.
+	 */
+	if (core_ctx->state_size == 0) {
+		unsigned int regval;
+		/*
+		 * get the state buffer info.
+		 */
+		regval = *((unsigned int *)core_ctx->comms_ram_addr +
+			(PVDEC_COM_RAM_STATE_BUF_SIZE_AND_OFFSET_OFFSET / sizeof(unsigned int)));
+		core_ctx->state_size = PVDEC_COM_RAM_BUF_GET_SIZE(regval, STATE);
+		core_ctx->state_offset = PVDEC_COM_RAM_BUF_GET_OFFSET(regval, STATE);
+	}
+
+	/*
+	 * If state buffer is available.
+	 */
+	if (core_ctx->state_size) {
+		/*
+		 * Determine the latest transaction to have passed each
+		 * checkpoint in the firmware.
+		 * Read the firmware state from VEC Local RAM
+		 */
+		vdeckm_vlr_copy(&firmware_state, (unsigned char *)core_ctx->comms_ram_addr +
+				core_ctx->state_offset, core_ctx->state_size);
+
+		for (pipe = 0; pipe < core_ctx->props.num_pixel_pipes; pipe++) {
+			/*
+			 * Set pipe presence.
+			 */
+			state->fw_state.pipe_state[pipe].is_pipe_present = 1;
+
+			/*
+			 * For checkpoints copy message ids here. These will
+			 * be translated into transaction ids later.
+			 */
+			memcpy(state->fw_state.pipe_state[pipe].acheck_point,
+			       firmware_state.pipestate[pipe].check_point,
+				sizeof(state->fw_state.pipe_state[pipe].acheck_point));
+			state->fw_state.pipe_state[pipe].firmware_action  =
+				firmware_state.pipestate[pipe].firmware_action;
+			state->fw_state.pipe_state[pipe].cur_codec =
+				firmware_state.pipestate[pipe].curr_codec;
+			state->fw_state.pipe_state[pipe].fe_slices =
+				firmware_state.pipestate[pipe].fe_slices;
+			state->fw_state.pipe_state[pipe].be_slices =
+				firmware_state.pipestate[pipe].be_slices;
+			state->fw_state.pipe_state[pipe].fe_errored_slices =
+				firmware_state.pipestate[pipe].fe_errored_slices;
+			state->fw_state.pipe_state[pipe].be_errored_slices =
+				firmware_state.pipestate[pipe].be_errored_slices;
+			state->fw_state.pipe_state[pipe].be_mbs_dropped =
+				firmware_state.pipestate[pipe].be_mbs_dropped;
+			state->fw_state.pipe_state[pipe].be_mbs_recovered =
+				firmware_state.pipestate[pipe].be_mbs_recovered;
+			state->fw_state.pipe_state[pipe].fe_mb.x =
+				firmware_state.pipestate[pipe].last_fe_mb_xy & 0xFF;
+			state->fw_state.pipe_state[pipe].fe_mb.y =
+				(firmware_state.pipestate[pipe].last_fe_mb_xy >> 16) & 0xFF;
+			state->fw_state.pipe_state[pipe].be_mb.x =
+				REGIO_READ_FIELD(firmware_state.pipestate[pipe].last_be_mb_xy,
+						 MSVDX_VDMC,
+						 CR_VDMC_MACROBLOCK_NUMBER,
+						 CR_VDMC_MACROBLOCK_X_OFFSET);
+			state->fw_state.pipe_state[pipe].be_mb.y =
+				REGIO_READ_FIELD(firmware_state.pipestate[pipe].last_be_mb_xy,
+						 MSVDX_VDMC,
+						 CR_VDMC_MACROBLOCK_NUMBER,
+						 CR_VDMC_MACROBLOCK_Y_OFFSET);
+		}
+	}
+
+	return 0;
+}
+
+static int vdeckm_prepare_batch(struct vdeckm_context *core_ctx,
+				const struct hwctrl_batch_msgdata *batch_msgdata,
+				unsigned char **msg)
+{
+	unsigned char vdec_flags = 0;
+	unsigned short flags = 0;
+	unsigned char *pmsg = kzalloc(FW_DEVA_DECODE_SIZE, GFP_KERNEL);
+	struct vidio_ddbufinfo *pbatch_msg_bufinfo = batch_msgdata->batchmsg_bufinfo;
+
+	if (!pmsg)
+		return IMG_ERROR_MALLOC_FAILED;
+
+	if (batch_msgdata->size_delimited_mode)
+		vdec_flags |= FW_VDEC_NAL_SIZE_DELIM;
+
+	flags |= FW_DEVA_RENDER_HOST_INT;
+
+	/*
+	 * Message type and stream ID
+	 */
+	MEMIO_WRITE_FIELD(pmsg, FW_DEVA_GENMSG_MSG_TYPE, FW_DEVA_PARSE, unsigned char*);
+
+	MEMIO_WRITE_FIELD(pmsg, FW_DEVA_DECODE_CTRL_ALLOC_ADDR,
+			  (unsigned int)pbatch_msg_bufinfo->dev_virt, unsigned char*);
+
+	MEMIO_WRITE_FIELD(pmsg, FW_DEVA_DECODE_BUFFER_SIZE,
+			  batch_msgdata->ctrl_alloc_bytes / sizeof(unsigned int), unsigned char*);
+
+	/*
+	 * Operating mode and decode flags
+	 */
+	MEMIO_WRITE_FIELD(pmsg, FW_DEVA_DECODE_OPERATING_MODE, batch_msgdata->operating_mode,
+			  unsigned char*);
+
+	MEMIO_WRITE_FIELD(pmsg, FW_DEVA_DECODE_FLAGS, flags, unsigned char*);
+
+	MEMIO_WRITE_FIELD(pmsg, FW_DEVA_DECODE_VDEC_FLAGS, vdec_flags, unsigned char*);
+
+	MEMIO_WRITE_FIELD(pmsg, FW_DEVA_DECODE_GENC_ID, batch_msgdata->genc_id, unsigned char*);
+
+	MEMIO_WRITE_FIELD(pmsg, FW_DEVA_DECODE_MB_LOAD, batch_msgdata->mb_load, unsigned char*);
+
+	MEMIO_WRITE_FIELD(pmsg, FW_DEVA_DECODE_STREAMID,
+			  GET_STREAM_ID(batch_msgdata->transaction_id), unsigned char*);
+
+	MEMIO_WRITE_FIELD(pmsg, FW_DEVA_DECODE_EXT_STATE_BUFFER,
+			  (unsigned int)batch_msgdata->pvdec_fwctx->dev_virt, unsigned char*);
+
+	MEMIO_WRITE_FIELD(pmsg, FW_DEVA_DECODE_MSG_ID, ++core_ctx->current_msgid,
+			  unsigned char*);
+
+	MEMIO_WRITE_FIELD(pmsg, FW_DEVA_DECODE_TRANS_ID, batch_msgdata->transaction_id,
+			  unsigned char*);
+
+	MEMIO_WRITE_FIELD(pmsg, FW_DEVA_DECODE_TILE_CFG, batch_msgdata->tile_cfg, unsigned char*);
+
+	/*
+	 * size of message
+	 */
+	MEMIO_WRITE_FIELD(pmsg, FW_DEVA_GENMSG_MSG_SIZE,
+			  FW_DEVA_DECODE_SIZE / sizeof(unsigned int), unsigned char*);
+
+	*msg = pmsg;
+
+	return 0;
+}
+
+static int vdeckm_prepare_fragment(struct vdeckm_context *core_ctx,
+				   const struct hwctrl_fragment_msgdata
+				   *fragment_msgdata,
+				   unsigned char **msg)
+{
+	struct vidio_ddbufinfo *pbatch_msg_bufinfo = NULL;
+	unsigned char *pmsg = NULL;
+
+	pbatch_msg_bufinfo = fragment_msgdata->batchmsg_bufinfo;
+
+	if (!(fragment_msgdata->batchmsg_bufinfo)) {
+		pr_err("Batch message info missing!\n");
+		return IMG_ERROR_INVALID_PARAMETERS;
+	}
+
+	pmsg = kzalloc(FW_DEVA_DECODE_FRAGMENT_SIZE, GFP_KERNEL);
+	if (!pmsg)
+		return IMG_ERROR_MALLOC_FAILED;
+	/*
+	 * message type and stream id
+	 */
+	MEMIO_WRITE_FIELD(pmsg, FW_DEVA_GENMSG_MSG_TYPE,
+			  FW_DEVA_PARSE_FRAGMENT, unsigned char*);
+	MEMIO_WRITE_FIELD(pmsg, FW_DEVA_DECODE_MSG_ID, ++core_ctx->current_msgid, unsigned char*);
+
+	MEMIO_WRITE_FIELD(pmsg, FW_DEVA_DECODE_FRAGMENT_CTRL_ALLOC_ADDR,
+			  (unsigned int)pbatch_msg_bufinfo->dev_virt
+			  + fragment_msgdata->ctrl_alloc_offset, unsigned char*);
+	MEMIO_WRITE_FIELD(pmsg, FW_DEVA_DECODE_FRAGMENT_BUFFER_SIZE,
+			  fragment_msgdata->ctrl_alloc_bytes / sizeof(unsigned int),
+			  unsigned char*);
+
+	/*
+	 * size of message
+	 */
+	MEMIO_WRITE_FIELD(pmsg, FW_DEVA_GENMSG_MSG_SIZE,
+			  FW_DEVA_DECODE_FRAGMENT_SIZE / sizeof(unsigned int), unsigned char*);
+
+	*msg = pmsg;
+
+	return 0;
+}
+
+static int vdeckm_get_message(const void *hndl_vxd, const enum hwctrl_msgid msgid,
+			      const struct hwctrl_msgdata *msgdata,
+			      struct hwctrl_to_kernel_msg *to_kernelmsg)
+{
+	unsigned int result = 0;
+	struct vdeckm_context *core_ctx = (struct vdeckm_context *)hndl_vxd;
+
+	if (!core_ctx || !to_kernelmsg || !msgdata)
+		return IMG_ERROR_INVALID_PARAMETERS;
+
+	switch (msgid) {
+	case HWCTRL_MSGID_BATCH:
+		result = vdeckm_prepare_batch(core_ctx, &msgdata->batch_msgdata,
+					      &to_kernelmsg->msg_hdr);
+		break;
+
+	case HWCTRL_MSGID_FRAGMENT:
+		result = vdeckm_prepare_fragment(core_ctx, &msgdata->fragment_msgdata,
+						 &to_kernelmsg->msg_hdr);
+		vxd_set_msgflag(VXD_MSG_FLAG_DROP, &to_kernelmsg->flags);
+		break;
+
+	default:
+		result = IMG_ERROR_GENERIC_FAILURE;
+		pr_err("got a message that is not supported by PVDEC");
+		break;
+	}
+
+	if (result == 0) {
+		/* Set the stream ID for the next message to be sent. */
+		to_kernelmsg->km_str_id = msgdata->km_str_id;
+		to_kernelmsg->msg_size = MEMIO_READ_FIELD(to_kernelmsg->msg_hdr,
+							  FW_DEVA_GENMSG_MSG_SIZE) *
+							  sizeof(unsigned int);
+	}
+
+	return result;
+}
+
+static void hwctrl_dump_state(struct vxd_states *prev_state,
+			      struct vxd_states *cur_state,
+			      unsigned char pipe_minus1)
+{
+	pr_info("Back-End MbX                          [% 10d]",
+		prev_state->fw_state.pipe_state[pipe_minus1].be_mb.x);
+	pr_info("Back-End MbY                          [% 10d]",
+		prev_state->fw_state.pipe_state[pipe_minus1].be_mb.y);
+	pr_info("Front-End MbX                         [% 10d]",
+		prev_state->fw_state.pipe_state[pipe_minus1].fe_mb.x);
+	pr_info("Front-End MbY                         [% 10d]",
+		prev_state->fw_state.pipe_state[pipe_minus1].fe_mb.y);
+	pr_info("VDECFW_CHECKPOINT_BE_PICTURE_COMPLETE [0x%08X]",
+		cur_state->fw_state.pipe_state[pipe_minus1].acheck_point
+		[VDECFW_CHECKPOINT_BE_PICTURE_COMPLETE]);
+	pr_info("VDECFW_CHECKPOINT_BE_1SLICE_DONE      [0x%08X]",
+		cur_state->fw_state.pipe_state[pipe_minus1].acheck_point
+		[VDECFW_CHECKPOINT_BE_1SLICE_DONE]);
+	pr_info("VDECFW_CHECKPOINT_BE_PICTURE_STARTED  [0x%08X]",
+		cur_state->fw_state.pipe_state[pipe_minus1].acheck_point
+		[VDECFW_CHECKPOINT_BE_PICTURE_STARTED]);
+	pr_info("VDECFW_CHECKPOINT_FE_PICTURE_COMPLETE [0x%08X]",
+		cur_state->fw_state.pipe_state[pipe_minus1].acheck_point
+		[VDECFW_CHECKPOINT_FE_PICTURE_COMPLETE]);
+	pr_info("VDECFW_CHECKPOINT_FE_PARSE_DONE       [0x%08X]",
+		cur_state->fw_state.pipe_state[pipe_minus1].acheck_point
+		[VDECFW_CHECKPOINT_FE_PARSE_DONE]);
+	pr_info("VDECFW_CHECKPOINT_FE_1SLICE_DONE      [0x%08X]",
+		cur_state->fw_state.pipe_state[pipe_minus1].acheck_point
+		[VDECFW_CHECKPOINT_FE_1SLICE_DONE]);
+	pr_info("VDECFW_CHECKPOINT_ENTDEC_STARTED      [0x%08X]",
+		cur_state->fw_state.pipe_state[pipe_minus1].acheck_point
+		[VDECFW_CHECKPOINT_ENTDEC_STARTED]);
+	pr_info("VDECFW_CHECKPOINT_FIRMWARE_SAVED      [0x%08X]",
+		cur_state->fw_state.pipe_state[pipe_minus1].acheck_point
+		[VDECFW_CHECKPOINT_FIRMWARE_SAVED]);
+	pr_info("VDECFW_CHECKPOINT_PICMAN_COMPLETE     [0x%08X]",
+		cur_state->fw_state.pipe_state[pipe_minus1].acheck_point
+		[VDECFW_CHECKPOINT_PICMAN_COMPLETE]);
+	pr_info("VDECFW_CHECKPOINT_FIRMWARE_READY      [0x%08X]",
+		cur_state->fw_state.pipe_state[pipe_minus1].acheck_point
+		[VDECFW_CHECKPOINT_FIRMWARE_READY]);
+	pr_info("VDECFW_CHECKPOINT_PICTURE_STARTED     [0x%08X]",
+		cur_state->fw_state.pipe_state[pipe_minus1].acheck_point
+		[VDECFW_CHECKPOINT_PICTURE_STARTED]);
+}
+
+static unsigned int hwctrl_calculate_load(struct bspp_pict_hdr_info *pict_hdr_info)
+{
+	return (((pict_hdr_info->coded_frame_size.width + 15) / 16)
+	       * ((pict_hdr_info->coded_frame_size.height + 15) / 16));
+}
+
+static int hwctrl_send_batch_message(struct hwctrl_ctx *hwctx,
+				     struct dec_decpict *decpict,
+				     void *vxd_dec_ctx)
+{
+	int result;
+	struct hwctrl_to_kernel_msg to_kernelmsg = {0};
+	struct vidio_ddbufinfo *batchmsg_bufinfo =
+		decpict->batch_msginfo->ddbuf_info;
+	struct hwctrl_msgdata msg_data;
+	struct hwctrl_batch_msgdata *batch_msgdata = &msg_data.batch_msgdata;
+
+	memset(&msg_data, 0, sizeof(msg_data));
+
+	msg_data.km_str_id = GET_STREAM_ID(decpict->transaction_id);
+
+	batch_msgdata->batchmsg_bufinfo  = batchmsg_bufinfo;
+
+	batch_msgdata->transaction_id    = decpict->transaction_id;
+	batch_msgdata->pvdec_fwctx       = decpict->str_pvdec_fw_ctxbuf;
+	batch_msgdata->ctrl_alloc_bytes  = decpict->ctrl_alloc_bytes;
+	batch_msgdata->operating_mode    = decpict->operating_op;
+	batch_msgdata->genc_id           = decpict->genc_id;
+	batch_msgdata->mb_load           = hwctrl_calculate_load(decpict->pict_hdr_info);
+	batch_msgdata->size_delimited_mode =
+		(decpict->pict_hdr_info->parser_mode != VDECFW_SCP_ONLY) ?
+		(1) : (0);
+
+	result = vdeckm_get_message(hwctx->hndl_vxd, HWCTRL_MSGID_BATCH,
+				    &msg_data, &to_kernelmsg);
+	if (result != 0) {
+		pr_err("failed to get decode message\n");
+		return result;
+	}
+
+	pr_debug("[HWCTRL] send batch message\n");
+	result = vdeckm_send_message(hwctx->hndl_vxd, &to_kernelmsg,
+				     vxd_dec_ctx);
+	if (result != 0)
+		return result;
+
+	vdeckm_return_msg(hwctx->hndl_vxd, &to_kernelmsg);
+
+	return 0;
+}
+
+int hwctrl_process_msg(void *hndl_hwctx, unsigned int msg_flags, unsigned int *msg,
+		       struct dec_decpict **decpict)
+{
+	int result;
+	struct hwctrl_ctx *hwctx;
+	enum vxd_msg_attr msg_attr = VXD_MSG_ATTR_NONE;
+	struct dec_decpict *pdecpict = NULL;
+	unsigned int val_first = 0;
+	unsigned int val_sec = 0;
+
+	if (!hndl_hwctx || !msg || !decpict) {
+		VDEC_ASSERT(0);
+		return IMG_ERROR_INVALID_PARAMETERS;
+	}
+
+	hwctx = (struct hwctrl_ctx *)hndl_hwctx;
+
+	*decpict = NULL;
+
+	pr_debug("[HWCTRL] : process message\n");
+	result = vdeckm_process_msg(hwctx->hndl_vxd, msg, &hwctx->pend_pict_list, msg_flags,
+				    &msg_attr, &pdecpict);
+
+	/* validate pointers before using them */
+	if (!pdecpict || !pdecpict->first_fld_fwmsg || !pdecpict->second_fld_fwmsg) {
+		VDEC_ASSERT(0);
+		return -EIO;
+	}
+
+	val_first = pdecpict->first_fld_fwmsg->pict_attrs.pict_attrs.deverror;
+	val_sec = pdecpict->second_fld_fwmsg->pict_attrs.pict_attrs.deverror;
+
+	if (val_first || val_sec)
+		pr_err("device signaled critical error!!!\n");
+
+	if (msg_attr == VXD_MSG_ATTR_DECODED) {
+		pdecpict->state = DECODER_PICTURE_STATE_DECODED;
+		/*
+		 * We have successfully decoded a picture as normally or
+		 * after the replay.
+		 * Mark HW is in good state.
+		 */
+		hwctx->is_fatal_state = 0;
+	} else if (msg_attr == VXD_MSG_ATTR_FATAL) {
+		struct hwctrl_state state;
+		unsigned char pipe_minus1 = 0;
+
+		memset(&state, 0, sizeof(state));
+
+		result = hwctrl_get_core_status(hwctx, &state);
+		if (result == 0) {
+			hwctx->is_prev_hw_state_set = 1;
+			memcpy(&hwctx->prev_state, &state, sizeof(struct hwctrl_state));
+
+			for (pipe_minus1 = 0; pipe_minus1 < hwctx->num_pipes;
+				pipe_minus1++) {
+				hwctrl_dump_state(&state.core_state, &state.core_state,
+						  pipe_minus1);
+			}
+		}
+	}
+	*decpict = pdecpict;
+
+	return 0;
+}
+
+int hwctrl_getcore_cached_status(void *hndl_hwctx, struct hwctrl_state *state)
+{
+	struct hwctrl_ctx *hwctx = (struct hwctrl_ctx *)hndl_hwctx;
+
+	if (hwctx->is_prev_hw_state_set)
+		memcpy(state, &hwctx->prev_state, sizeof(struct hwctrl_state));
+	else
+		return IMG_ERROR_UNEXPECTED_STATE;
+
+	return 0;
+}
+
+int hwctrl_get_core_status(void *hndl_hwctx, struct hwctrl_state *state)
+{
+	struct hwctrl_ctx *hwctx = (struct hwctrl_ctx *)hndl_hwctx;
+	unsigned int result = IMG_ERROR_GENERIC_FAILURE;
+
+	if (!hwctx->is_fatal_state && state) {
+		struct vxd_states *pcorestate = NULL;
+
+		pcorestate  = &state->core_state;
+
+		memset(pcorestate, 0, sizeof(*(pcorestate)));
+
+		result = vdeckm_get_core_state(hwctx->hndl_vxd, pcorestate);
+	}
+
+	return result;
+}
+
+int hwctrl_is_on_seq_replay(void *hndl_hwctx)
+{
+	struct hwctrl_ctx *hwctx = (struct hwctrl_ctx *)hndl_hwctx;
+
+	return hwctx->is_on_seq_replay;
+}
+
+int hwctrl_picture_submitbatch(void *hndl_hwctx, struct dec_decpict  *decpict, void *vxd_dec_ctx)
+{
+	struct hwctrl_ctx *hwctx = (struct hwctrl_ctx *)hndl_hwctx;
+
+	if (hwctx->is_initialised) {
+		lst_add(&hwctx->pend_pict_list, decpict);
+		if (!hwctx->is_on_seq_replay)
+			return hwctrl_send_batch_message(hwctx, decpict, vxd_dec_ctx);
+	}
+
+	return 0;
+}
+
+int hwctrl_getpicpend_pictlist(void *hndl_hwctx, unsigned int transaction_id,
+			       struct dec_decpict  **decpict)
+{
+	struct hwctrl_ctx *hwctx = (struct hwctrl_ctx *)hndl_hwctx;
+	struct dec_decpict  *dec_pic;
+
+	dec_pic = lst_first(&hwctx->pend_pict_list);
+	while (dec_pic) {
+		if (dec_pic->transaction_id == transaction_id) {
+			*decpict = dec_pic;
+			break;
+		}
+		dec_pic = lst_next(dec_pic);
+	}
+
+	if (!dec_pic)
+		return IMG_ERROR_INVALID_ID;
+
+	return 0;
+}
+
+int hwctrl_peekheadpiclist(void *hndl_hwctx, struct dec_decpict **decpict)
+{
+	struct hwctrl_ctx *hwctx = (struct hwctrl_ctx *)hndl_hwctx;
+
+	if (hwctx)
+		*decpict = lst_first(&hwctx->pend_pict_list);
+
+	if (*decpict)
+		return 0;
+
+	return IMG_ERROR_GENERIC_FAILURE;
+}
+
+int hwctrl_getdecodedpicture(void *hndl_hwctx, struct dec_decpict **decpict)
+{
+	struct hwctrl_ctx *hwctx = (struct hwctrl_ctx *)hndl_hwctx;
+
+	if (hwctx) {
+		struct dec_decpict *cur_decpict;
+		/*
+		 * Ensure that this picture is in the list.
+		 */
+		cur_decpict = lst_first(&hwctx->pend_pict_list);
+		while (cur_decpict) {
+			if (cur_decpict->state == DECODER_PICTURE_STATE_DECODED) {
+				*decpict = cur_decpict;
+				return 0;
+			}
+
+			cur_decpict = lst_next(cur_decpict);
+		}
+	}
+
+	return IMG_ERROR_VALUE_OUT_OF_RANGE;
+}
+
+void hwctrl_removefrom_piclist(void *hndl_hwctx, struct dec_decpict  *decpict)
+{
+	struct hwctrl_ctx *hwctx = (struct hwctrl_ctx *)hndl_hwctx;
+
+	if (hwctx) {
+		struct dec_decpict *cur_decpict;
+		/*
+		 * Ensure that this picture is in the list.
+		 */
+		cur_decpict = lst_first(&hwctx->pend_pict_list);
+		while (cur_decpict) {
+			if (cur_decpict == decpict) {
+				lst_remove(&hwctx->pend_pict_list, decpict);
+				break;
+			}
+
+			cur_decpict = lst_next(cur_decpict);
+		}
+	}
+}
+
+int hwctrl_getregsoffset(void *hndl_hwctx, struct decoder_regsoffsets *regs_offsets)
+{
+	struct hwctrl_ctx *hwctx = (struct hwctrl_ctx *)hndl_hwctx;
+
+	return vdeckm_getregsoffsets(hwctx->hndl_vxd, regs_offsets);
+}
+
+static int pvdec_create(struct vxd_dev *vxd, struct vxd_coreprops *core_props,
+			void **hndl_vdeckm_context)
+{
+	struct vdeckm_context  *corectx;
+	struct vxd_core_props hndl_core_props;
+	int result;
+
+	if (!hndl_vdeckm_context || !core_props)
+		return IMG_ERROR_INVALID_PARAMETERS;
+
+	/*
+	 * Obtain core context.
+	 */
+	corectx = &acore_ctx[0];
+
+	memset(corectx, 0, sizeof(*corectx));
+
+	corectx->core_num = 0;
+
+	result = vxd_pvdec_get_props(vxd->dev, vxd->reg_base, &hndl_core_props);
+	if (result != 0)
+		return result;
+
+	vxd_get_coreproperties(&hndl_core_props, &corectx->props);
+
+	memcpy(core_props, &corectx->props, sizeof(*core_props));
+
+	*hndl_vdeckm_context = corectx;
+
+	return 0;
+}
+
+int hwctrl_deinitialise(void *hndl_hwctx)
+{
+	struct hwctrl_ctx *hwctx = (struct hwctrl_ctx *)hndl_hwctx;
+
+	if (hwctx->is_initialised) {
+		kfree(hwctx);
+		hwctx = NULL;
+	}
+
+	return 0;
+}
+
+int hwctrl_initialise(void *dec_core, void *comp_int_userdata,
+		      const struct vdecdd_dd_devconfig  *dd_devconfig,
+		      struct vxd_coreprops *core_props, void **hndl_hwctx)
+{
+	struct hwctrl_ctx *hwctx = (struct hwctrl_ctx *)*hndl_hwctx;
+	int result;
+
+	if (!hwctx) {
+		hwctx = kzalloc(sizeof(*(hwctx)), GFP_KERNEL);
+		if (!hwctx)
+			return IMG_ERROR_OUT_OF_MEMORY;
+
+		*hndl_hwctx = hwctx;
+	}
+
+	if (!hwctx->is_initialised) {
+		hwctx->hndl_vxd = ((struct dec_core_ctx *)dec_core)->dec_ctx->dev_handle;
+		result = pvdec_create(hwctx->hndl_vxd, core_props, &hwctx->hndl_vxd);
+		if (result != 0)
+			goto error;
+
+		lst_init(&hwctx->pend_pict_list);
+
+		hwctx->devconfig = *dd_devconfig;
+		hwctx->num_pipes = core_props->num_pixel_pipes;
+		hwctx->comp_init_userdata = comp_int_userdata;
+		hwctx->dec_core = dec_core;
+		hwctx->is_initialised = 1;
+		hwctx->is_on_seq_replay = 0;
+		hwctx->is_fatal_state = 0;
+	}
+
+	return 0;
+error:
+	hwctrl_deinitialise(*hndl_hwctx);
+
+	return result;
+}
+
+static int hwctrl_send_fragment_message(struct hwctrl_ctx *hwctx,
+					struct dec_pict_fragment *pict_fragment,
+					struct dec_decpict *decpict,
+					void *vxd_dec_ctx)
+{
+	int result;
+	struct hwctrl_to_kernel_msg to_kernelmsg = {0};
+	struct hwctrl_msgdata msg_data;
+	struct hwctrl_fragment_msgdata *pfragment_msgdata =
+		&msg_data.fragment_msgdata;
+
+	msg_data.km_str_id = GET_STREAM_ID(decpict->transaction_id);
+
+	pfragment_msgdata->ctrl_alloc_bytes = pict_fragment->ctrl_alloc_bytes;
+
+	pfragment_msgdata->ctrl_alloc_offset = pict_fragment->ctrl_alloc_offset;
+
+	pfragment_msgdata->batchmsg_bufinfo = decpict->batch_msginfo->ddbuf_info;
+
+	result = vdeckm_get_message(hwctx->hndl_vxd, HWCTRL_MSGID_FRAGMENT, &msg_data,
+				    &to_kernelmsg);
+	if (result != 0) {
+		pr_err("Failed to get decode message\n");
+		return result;
+	}
+
+	result = vdeckm_send_message(hwctx->hndl_vxd, &to_kernelmsg, vxd_dec_ctx);
+	if (result != 0)
+		return result;
+
+	vdeckm_return_msg(hwctx->hndl_vxd, &to_kernelmsg);
+
+	return 0;
+}
+
+int hwctrl_picture_submit_fragment(void *hndl_hwctx,
+				   struct dec_pict_fragment  *pict_fragment,
+				   struct dec_decpict *decpict,
+				   void *vxd_dec_ctx)
+{
+	struct hwctrl_ctx *hwctx = (struct hwctrl_ctx *)hndl_hwctx;
+	unsigned int result = 0;
+
+	if (hwctx->is_initialised) {
+		result = hwctrl_send_fragment_message(hwctx, pict_fragment,
+						      decpict, vxd_dec_ctx);
+		if (result != 0)
+			pr_err("Failed to send fragment message to firmware !");
+	}
+
+	return result;
+}
diff --git a/drivers/staging/media/vxd/decoder/hw_control.h b/drivers/staging/media/vxd/decoder/hw_control.h
new file mode 100644
index 000000000000..3f430969b998
--- /dev/null
+++ b/drivers/staging/media/vxd/decoder/hw_control.h
@@ -0,0 +1,144 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * VXD DEC Hardware control implementation
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ *	Amit Makani <amit.makani@ti.com>
+ *
+ * Re-written for upstreamimg
+ *	Sidraya Jayagond <sidraya.bj@pathpartnertech.com>
+ *	Prashanth Kumar Amai <prashanth.ka@pathpartnertech.com>
+ */
+
+#ifndef _HW_CONTROL_H
+#define _HW_CONTROL_H
+
+#include "bspp.h"
+#include "decoder.h"
+#include "fw_interface.h"
+#include "img_dec_common.h"
+#include "img_errors.h"
+#include "lst.h"
+#include "mem_io.h"
+#include "vdecdd_defs.h"
+#include "vdecfw_shared.h"
+#include "vid_buf.h"
+#include "vxd_ext.h"
+#include "vxd_props.h"
+
+/* Size of additional buffers needed for each HEVC picture */
+#ifdef HAS_HEVC
+
+/* Empirically defined */
+#define MEM_TO_REG_BUF_SIZE 0x2000
+
+/*
+ * Max. no. of slices found in stream db: approx. 2200,
+ * set MAX_SLICES to 2368 to get buffer size page aligned
+ */
+#define MAX_SLICES 2368
+#define SLICE_PARAMS_SIZE 64
+#define SLICE_PARAMS_BUF_SIZE (MAX_SLICES * SLICE_PARAMS_SIZE)
+
+/*
+ * Size of buffer for "above params" structure, sufficient for stream of width 8192
+ * 192 * (8192/64) == 0x6000, see "above_param_size" in TRM
+ */
+#define ABOVE_PARAMS_BUF_SIZE 0x6000
+#endif
+
+enum hwctrl_msgid {
+	HWCTRL_MSGID_BATCH     = 0,
+	HWCTRL_MSGID_FRAGMENT  = 1,
+	CORE_MSGID_MAX,
+	CORE_MSGID_FORCE32BITS = 0x7FFFFFFFU
+};
+
+struct hwctrl_to_kernel_msg {
+	unsigned int msg_size;
+	unsigned int km_str_id;
+	unsigned int flags;
+	unsigned char *msg_hdr;
+};
+
+struct hwctrl_batch_msgdata {
+	struct vidio_ddbufinfo *batchmsg_bufinfo;
+	struct vidio_ddbufinfo *pvdec_fwctx;
+	unsigned int ctrl_alloc_bytes;
+	unsigned int operating_mode;
+	unsigned int transaction_id;
+	unsigned int tile_cfg;
+	unsigned int genc_id;
+	unsigned int mb_load;
+	unsigned int size_delimited_mode;
+};
+
+struct hwctrl_fragment_msgdata {
+	struct vidio_ddbufinfo *batchmsg_bufinfo;
+	unsigned int ctrl_alloc_offset;
+	unsigned int ctrl_alloc_bytes;
+};
+
+struct hwctrl_msgdata {
+	unsigned int km_str_id;
+	struct hwctrl_batch_msgdata batch_msgdata;
+	struct hwctrl_fragment_msgdata fragment_msgdata;
+};
+
+/*
+ * This structure contains MSVDX Message information.
+ */
+struct hwctrl_msgstatus {
+	unsigned char control_fence_id[VDECFW_MSGID_CONTROL_TYPES];
+	unsigned char decode_fence_id[VDECFW_MSGID_DECODE_TYPES];
+	unsigned char completion_fence_id[VDECFW_MSGID_COMPLETION_TYPES];
+};
+
+/*
+ * this structure contains the HWCTRL Core state.
+ */
+struct hwctrl_state {
+	struct vxd_states core_state;
+	struct hwctrl_msgstatus fwmsg_status;
+	struct hwctrl_msgstatus hostmsg_status;
+};
+
+int hwctrl_picture_submit_fragment(void *hndl_hwctx,
+				   struct dec_pict_fragment *pict_fragment,
+				   struct dec_decpict *decpict,
+				   void *vxd_dec_ctx);
+
+int hwctrl_process_msg(void *hndl_hwct, unsigned int msg_flags, unsigned int *msg,
+		       struct dec_decpict **decpict);
+
+int hwctrl_getcore_cached_status(void *hndl_hwctx, struct hwctrl_state *state);
+
+int hwctrl_get_core_status(void *hndl_hwctx, struct hwctrl_state *state);
+
+int hwctrl_is_on_seq_replay(void *hndl_hwctx);
+
+int hwctrl_picture_submitbatch(void *hndl_hwctx, struct dec_decpict *decpict,
+			       void *vxd_dec_ctx);
+
+int hwctrl_getpicpend_pictlist(void *hndl_hwctx, unsigned int transaction_id,
+			       struct dec_decpict **decpict);
+
+int hwctrl_peekheadpiclist(void *hndl_hwctx, struct dec_decpict **decpict);
+
+int hwctrl_getdecodedpicture(void *hndl_hwctx, struct dec_decpict **decpict);
+
+void hwctrl_removefrom_piclist(void *hndl_hwctx, struct dec_decpict *decpict);
+
+int hwctrl_getregsoffset(void *hndl_hwctx,
+			 struct decoder_regsoffsets *regs_offsets);
+
+int hwctrl_initialise(void *dec_core, void *comp_int_userdata,
+		      const struct vdecdd_dd_devconfig *dd_devconfig,
+		      struct vxd_coreprops *core_props, void **hndl_hwctx);
+
+int hwctrl_deinitialise(void *hndl_hwctx);
+
+#endif /* _HW_CONTROL_H */
-- 
2.17.1


-- 






This
message contains confidential information and is intended only 
for the
individual(s) named. If you are not the intended
recipient, you are 
notified that disclosing, copying, distributing or taking any
action in 
reliance on the contents of this mail and attached file/s is strictly
prohibited. Please notify the
sender immediately and delete this e-mail 
from your system. E-mail transmission
cannot be guaranteed to be secured or 
error-free as information could be
intercepted, corrupted, lost, destroyed, 
arrive late or incomplete, or contain
viruses. The sender therefore does 
not accept liability for any errors or
omissions in the contents of this 
message, which arise as a result of e-mail
transmission.

^ permalink raw reply related	[flat|nested] 48+ messages in thread

* [PATCH 07/30] v4l: vxd-dec: Add vxd core module
  2021-08-18 14:10 [PATCH 00/30] TI Video Decoder driver upstreaming to v5.14-rc6 kernel sidraya.bj
                   ` (5 preceding siblings ...)
  2021-08-18 14:10 ` [PATCH 06/30] v4l: vxd-dec: Add hardware control modules sidraya.bj
@ 2021-08-18 14:10 ` sidraya.bj
  2021-08-18 14:10 ` [PATCH 08/30] v4l: vxd-dec: Add translation control modules sidraya.bj
                   ` (24 subsequent siblings)
  31 siblings, 0 replies; 48+ messages in thread
From: sidraya.bj @ 2021-08-18 14:10 UTC (permalink / raw)
  To: gregkh, linux-staging, linux-kernel
  Cc: prashanth.ka, praneeth, mchehab, linux-media, praveen.ap, Sidraya

From: Sidraya <sidraya.bj@pathpartnertech.com>

This patch prepares the picture commands for the firmware
it includes reconstructed and alternate picture commands.

Signed-off-by: Amit Makani <amit.makani@ti.com>
Signed-off-by: Sidraya <sidraya.bj@pathpartnertech.com>
---
 MAINTAINERS                                 |    2 +
 drivers/staging/media/vxd/decoder/vxd_int.c | 1137 +++++++++++++++++++
 drivers/staging/media/vxd/decoder/vxd_int.h |  128 +++
 3 files changed, 1267 insertions(+)
 create mode 100644 drivers/staging/media/vxd/decoder/vxd_int.c
 create mode 100644 drivers/staging/media/vxd/decoder/vxd_int.h

diff --git a/MAINTAINERS b/MAINTAINERS
index 2327ea12caa6..7b21ebfc61d4 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -19548,6 +19548,8 @@ F:	drivers/staging/media/vxd/decoder/img_dec_common.h
 F:	drivers/staging/media/vxd/decoder/vxd_core.c
 F:	drivers/staging/media/vxd/decoder/vxd_dec.c
 F:	drivers/staging/media/vxd/decoder/vxd_dec.h
+F:	drivers/staging/media/vxd/decoder/vxd_int.c
+F:	drivers/staging/media/vxd/decoder/vxd_int.h
 F:	drivers/staging/media/vxd/decoder/vxd_pvdec.c
 F:	drivers/staging/media/vxd/decoder/vxd_pvdec_priv.h
 F:	drivers/staging/media/vxd/decoder/vxd_pvdec_regs.h
diff --git a/drivers/staging/media/vxd/decoder/vxd_int.c b/drivers/staging/media/vxd/decoder/vxd_int.c
new file mode 100644
index 000000000000..c75aef6deed1
--- /dev/null
+++ b/drivers/staging/media/vxd/decoder/vxd_int.c
@@ -0,0 +1,1137 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * VXD DEC Common low level core interface component
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ *	Amit Makani <amit.makani@ti.com>
+ *
+ * Re-written for upstreamimg
+ *	Sidraya Jayagond <sidraya.bj@pathpartnertech.com>
+ *	Prashanth Kumar Amai <prashanth.ka@pathpartnertech.com>
+ */
+
+#include <linux/types.h>
+#include <linux/dma-mapping.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-mem2mem.h>
+
+#include "bspp.h"
+#include "fw_interface.h"
+#include "h264fw_data.h"
+#include "img_errors.h"
+#include "img_dec_common.h"
+#include "img_pvdec_core_regs.h"
+#include "img_pvdec_pixel_regs.h"
+#include "img_pvdec_test_regs.h"
+#include "img_vdec_fw_msg.h"
+#include "img_video_bus4_mmu_regs.h"
+#include "img_msvdx_core_regs.h"
+#include "img_msvdx_cmds.h"
+#include "reg_io2.h"
+#include "scaler_setup.h"
+#include "vdecdd_defs.h"
+#include "vdecdd_utils.h"
+#include "vdecfw_shared.h"
+#include "vdec_defs.h"
+#include "vxd_ext.h"
+#include "vxd_int.h"
+#include "vxd_props.h"
+
+#define MSVDX_CACHE_REF_OFFSET_V100     (72L)
+#define MSVDX_CACHE_ROW_OFFSET_V100     (4L)
+
+#define MSVDX_CACHE_REF_OFFSET_V550     (144L)
+#define MSVDX_CACHE_ROW_OFFSET_V550     (8L)
+
+#define GET_BITS(v, lb, n)       (((v) >> (lb)) & ((1 << (n)) - 1))
+#define IS_PVDEC_PIPELINE(std)   ((std) == VDEC_STD_HEVC ? 1 : 0)
+
+static int amsvdx_codecmode[VDEC_STD_MAX] = {
+	/* Invalid */
+	-1,
+	/* MPEG2 */
+	3,
+	/* MPEG4 */
+	4,
+	/* H263 */
+	4,
+	/* H264 */
+	1,
+	/* VC1 */
+	2,
+	/* AVS */
+	5,
+	/* RealVideo (8) */
+	8,
+	/* JPEG */
+	0,
+	/* On2 VP6 */
+	10,
+	/* On2 VP8 */
+	11,
+	/* Invalid */
+#ifdef HAS_VP9
+	/* On2 VP9 */
+	13,
+#endif
+	/* Sorenson */
+	4,
+	/* HEVC */
+	12,
+};
+
+struct msvdx_scaler_coeff_cmds {
+	unsigned int acmd_horizluma_coeff[VDECFW_NUM_SCALE_COEFFS];
+	unsigned int acmd_vertluma_coeff[VDECFW_NUM_SCALE_COEFFS];
+	unsigned int acmd_horizchroma_coeff[VDECFW_NUM_SCALE_COEFFS];
+	unsigned int acmd_vertchroma_coeff[VDECFW_NUM_SCALE_COEFFS];
+};
+
+static struct vxd_vidstd_props astd_props[] = {
+	{ VDEC_STD_MPEG2, CORE_REVISION(7, 0, 0), 64, 16, 4096, 4096, 0, 8, 8,
+	  PIXEL_FORMAT_420 },
+	{ VDEC_STD_MPEG4, CORE_REVISION(7, 0, 0), 64, 16, 4096, 4096, 0, 8, 8,
+	  PIXEL_FORMAT_420 },
+	{ VDEC_STD_H263, CORE_REVISION(7, 0, 0), 64, 16, 4096, 4096, 0, 8, 8,
+	  PIXEL_FORMAT_420 },
+	{ VDEC_STD_H264, CORE_REVISION(7, 0, 0), 64, 16, 4096, 4096, 0x10000, 8,
+	  8, PIXEL_FORMAT_420 },
+	{ VDEC_STD_VC1, CORE_REVISION(7, 0, 0), 80, 16, 4096, 4096, 0, 8, 8,
+	  PIXEL_FORMAT_420 },
+	{ VDEC_STD_AVS, CORE_REVISION(7, 0, 0), 64, 16, 4096, 4096, 0, 8, 8,
+	  PIXEL_FORMAT_420 },
+	{ VDEC_STD_REAL, CORE_REVISION(7, 0, 0), 64, 16, 4096, 4096, 0, 8, 8,
+	  PIXEL_FORMAT_420 },
+	{ VDEC_STD_JPEG, CORE_REVISION(7, 0, 0), 64, 16, 32768, 32768, 0, 8, 8,
+	  PIXEL_FORMAT_444 },
+	{ VDEC_STD_VP6, CORE_REVISION(7, 0, 0), 64, 16, 4096, 4096, 0, 8, 8,
+	  PIXEL_FORMAT_420 },
+	{ VDEC_STD_VP8, CORE_REVISION(7, 0, 0), 64, 16, 4096, 4096, 0, 8, 8,
+	  PIXEL_FORMAT_420 },
+	{ VDEC_STD_SORENSON, CORE_REVISION(7, 0, 0), 64, 16, 4096, 4096, 0, 8,
+	  8, PIXEL_FORMAT_420 },
+	{ VDEC_STD_HEVC, CORE_REVISION(7, 0, 0), 64, 16, 8192, 8192, 0, 8, 8,
+	  PIXEL_FORMAT_420 },
+};
+
+enum vdec_msvdx_async_mode {
+	VDEC_MSVDX_ASYNC_NORMAL,
+	VDEC_MSVDX_ASYNC_VDMC,
+	VDEC_MSVDX_ASYNC_VDEB,
+	VDEC_MSVDX_ASYNC_FORCE32BITS = 0x7FFFFFFFU
+};
+
+/* MSVDX row strides for video buffers. */
+static const unsigned int amsvdx_64byte_row_stride[] = {
+	384, 768, 1280, 1920, 512, 1024, 2048, 4096
+};
+
+/* MSVDX row strides for jpeg buffers. */
+static const unsigned int amsvdx_jpeg_row_stride[] = {
+	256, 384, 512, 768, 1024, 1536, 2048, 3072, 4096, 6144, 8192, 12288, 16384, 24576, 32768
+};
+
+/* VXD Core major revision. */
+static unsigned int maj_rev;
+/* VXD Core minor revision. */
+static unsigned int min_rev;
+/* VXD Core maintenance revision. */
+static unsigned int maint_rev;
+
+static int get_stride_code(enum vdec_vid_std vidstd, unsigned int row_stride)
+{
+	unsigned int i;
+
+	if (vidstd == VDEC_STD_JPEG) {
+		for (i = 0; i < (sizeof(amsvdx_jpeg_row_stride) /
+			sizeof(amsvdx_jpeg_row_stride[0])); i++) {
+			if (amsvdx_jpeg_row_stride[i] == row_stride)
+				return i;
+		}
+	} else {
+		for (i = 0; i < (sizeof(amsvdx_64byte_row_stride) /
+			sizeof(amsvdx_64byte_row_stride[0])); i++) {
+			if (amsvdx_64byte_row_stride[i] == row_stride)
+				return i;
+		}
+	}
+
+	return -1;
+}
+
+/* Obtains the hardware defined video profile. */
+static unsigned int vxd_getprofile(enum vdec_vid_std vidstd, unsigned int std_profile)
+{
+	unsigned int profile = 0;
+
+	switch (vidstd) {
+	case VDEC_STD_H264:
+		switch (std_profile) {
+		case H264_PROFILE_BASELINE:
+			profile = 0;
+			break;
+
+		/*
+		 * Extended may be attempted as Baseline or
+		 * Main depending on the constraint_set_flags
+		 */
+		case H264_PROFILE_EXTENDED:
+		case H264_PROFILE_MAIN:
+			profile = 1;
+			break;
+
+		case H264_PROFILE_HIGH:
+		case H264_PROFILE_HIGH444:
+		case H264_PROFILE_HIGH422:
+		case H264_PROFILE_HIGH10:
+		case H264_PROFILE_CAVLC444:
+		case H264_PROFILE_MVC_HIGH:
+		case H264_PROFILE_MVC_STEREO:
+			profile = 2;
+			break;
+		default:
+			profile = 2;
+			break;
+		}
+		break;
+
+	default:
+		profile = 0;
+		break;
+	}
+
+	return profile;
+}
+
+static int vxd_getcoreproperties(struct vxd_coreprops *coreprops,
+				 unsigned int corerev,
+				 unsigned int pvdec_coreid, unsigned int mmu_config0,
+				 unsigned int mmu_config1, unsigned int *pixel_pipecfg,
+				 unsigned int *pixel_misccfg, unsigned int max_framecfg)
+{
+	unsigned int group_id;
+	unsigned int core_id;
+	unsigned int core_config;
+	unsigned int extended_address_range;
+	unsigned char group_size = 0;
+	unsigned char pipe_minus1 = 0;
+	unsigned int max_h264_hw_chromaformat = 0;
+	unsigned int max_hevc_hw_chromaformat = 0;
+	unsigned int max_bitdepth_luma = 0;
+	unsigned int i;
+
+	struct pvdec_core_rev core_rev;
+
+	if (!coreprops || !pixel_pipecfg || !pixel_misccfg)
+		return IMG_ERROR_INVALID_PARAMETERS;
+
+	/* PVDEC Core Revision Information */
+	core_rev.maj_rev = REGIO_READ_FIELD(corerev, PVDEC_CORE, CR_PVDEC_CORE_REV,
+					    CR_PVDEC_MAJOR_REV);
+	core_rev.min_rev = REGIO_READ_FIELD(corerev, PVDEC_CORE, CR_PVDEC_CORE_REV,
+					    CR_PVDEC_MINOR_REV);
+	core_rev.maint_rev = REGIO_READ_FIELD(corerev, PVDEC_CORE, CR_PVDEC_CORE_REV,
+					      CR_PVDEC_MAINT_REV);
+
+	/* core id */
+	group_id = REGIO_READ_FIELD(pvdec_coreid, PVDEC_CORE, CR_PVDEC_CORE_ID, CR_GROUP_ID);
+	core_id = REGIO_READ_FIELD(pvdec_coreid, PVDEC_CORE, CR_PVDEC_CORE_ID, CR_CORE_ID);
+
+	/* Ensure that the core is IMG Video Decoder (PVDEC). */
+	if (group_id != 3 || core_id != 3)
+		return IMG_ERROR_DEVICE_NOT_FOUND;
+
+	core_config = REGIO_READ_FIELD(pvdec_coreid, PVDEC_CORE,
+				       CR_PVDEC_CORE_ID, CR_PVDEC_CORE_CONFIG);
+
+	memset(coreprops, 0, sizeof(*(coreprops)));
+
+	/*  Construct core version name. */
+	snprintf(coreprops->aversion, VER_STR_LEN, "%d.%d.%d",
+		 core_rev.maj_rev, core_rev.min_rev, core_rev.maint_rev);
+
+	coreprops->mmu_support_stride_per_context =
+			REGIO_READ_FIELD(mmu_config1, IMG_VIDEO_BUS4_MMU,
+					 MMU_CONFIG1,
+					 SUPPORT_STRIDE_PER_CONTEXT) == 1 ? 1 : 0;
+
+	coreprops->mmu_support_secure = REGIO_READ_FIELD(mmu_config1, IMG_VIDEO_BUS4_MMU,
+							 MMU_CONFIG1, SUPPORT_SECURE) == 1 ? 1 : 0;
+
+	extended_address_range = REGIO_READ_FIELD(mmu_config0, IMG_VIDEO_BUS4_MMU,
+						  MMU_CONFIG0, EXTENDED_ADDR_RANGE);
+
+	switch (extended_address_range) {
+	case 0:
+		coreprops->mmu_type = MMU_TYPE_32BIT;
+		break;
+	case 4:
+		coreprops->mmu_type = MMU_TYPE_36BIT;
+		break;
+	case 8:
+		coreprops->mmu_type = MMU_TYPE_40BIT;
+		break;
+	default:
+		return IMG_ERROR_NOT_SUPPORTED;
+	}
+
+	group_size += REGIO_READ_FIELD(mmu_config0, IMG_VIDEO_BUS4_MMU,
+			MMU_CONFIG0, GROUP_OVERRIDE_SIZE);
+
+	coreprops->num_entropy_pipes = core_config & 0xF;
+	coreprops->num_pixel_pipes = core_config >> 4 & 0xF;
+#ifdef	DEBUG_DECODER_DRIVER
+	pr_info("PVDEC revision %08x detected, id %08x.\n", corerev, core_id);
+	pr_info("Found %d entropy pipe(s), %d pixel pipe(s), %d group size",
+		coreprops->num_entropy_pipes, coreprops->num_pixel_pipes,
+		group_size);
+#endif
+
+	/* Set global rev info variables used by macros */
+	maj_rev = core_rev.maj_rev;
+	min_rev = core_rev.min_rev;
+	maint_rev = core_rev.maint_rev;
+
+	/* Default settings */
+	for (i = 0; i < ARRAY_SIZE(astd_props); i++) {
+		struct vxd_vidstd_props *pvidstd_props =
+			&coreprops->vidstd_props[astd_props[i].vidstd];
+		/*
+		 * Update video standard properties if the core is beyond
+		 * specified version and the properties are for newer cores
+		 * than the previous.
+		 */
+		if (FROM_REV(MAJOR_REVISION((int)astd_props[i].core_rev),
+			     MINOR_REVISION((int)astd_props[i].core_rev),
+			     MAINT_REVISION((int)astd_props[i].core_rev), int) &&
+		    astd_props[i].core_rev >= pvidstd_props->core_rev) {
+			*pvidstd_props = astd_props[i];
+
+			if (pvidstd_props->vidstd != VDEC_STD_JPEG &&
+			    (FROM_REV(8, 0, 0, int)) && (pvidstd_props->vidstd ==
+			    VDEC_STD_HEVC ? 1 : 0)) {
+				/*
+				 * override default values with values
+				 * specified in HW (register does not
+				 * exist in previous cores)
+				 */
+				pvidstd_props->max_width =
+					2 << REGIO_READ_FIELD(max_framecfg,
+						PVDEC_PIXEL,
+						CR_MAX_FRAME_CONFIG,
+						CR_PVDEC_HOR_MSB);
+
+				pvidstd_props->max_height =
+					2 << REGIO_READ_FIELD(max_framecfg,
+						PVDEC_PIXEL,
+						CR_MAX_FRAME_CONFIG,
+						CR_PVDEC_VER_MSB);
+			} else if (pvidstd_props->vidstd != VDEC_STD_JPEG &&
+				(FROM_REV(8, 0, 0, int))) {
+				pvidstd_props->max_width =
+					2 << REGIO_READ_FIELD(max_framecfg,
+						PVDEC_PIXEL,
+						CR_MAX_FRAME_CONFIG,
+						CR_MSVDX_HOR_MSB);
+
+				pvidstd_props->max_height =
+					2 << REGIO_READ_FIELD(max_framecfg,
+						PVDEC_PIXEL,
+						CR_MAX_FRAME_CONFIG,
+						CR_MSVDX_VER_MSB);
+			}
+		}
+	}
+
+	/* Populate the core properties. */
+	if (GET_BITS(core_config, 11, 1))
+		coreprops->hd_support = 1;
+
+	for (pipe_minus1 = 0; pipe_minus1 < coreprops->num_pixel_pipes;
+		pipe_minus1++) {
+		unsigned int current_bitdepth =
+			GET_BITS(pixel_misccfg[pipe_minus1], 4, 3) + 8;
+		unsigned int current_h264_hw_chromaformat =
+			GET_BITS(pixel_misccfg[pipe_minus1], 0, 2);
+		unsigned int current_hevc_hw_chromaformat =
+			GET_BITS(pixel_misccfg[pipe_minus1], 2, 2);
+#ifdef DEBUG_DECODER_DRIVER
+		pr_info("cur_bitdepth: %d  cur_h264_hw_chromaformat: %d",
+			current_bitdepth, current_h264_hw_chromaformat);
+		pr_info("cur_hevc_hw_chromaformat: %d  pipe_minus1: %d\n",
+			current_hevc_hw_chromaformat, pipe_minus1);
+#endif
+
+		if (GET_BITS(pixel_misccfg[pipe_minus1], 8, 1))
+			coreprops->rotation_support[pipe_minus1] = 1;
+
+		if (GET_BITS(pixel_misccfg[pipe_minus1], 9, 1))
+			coreprops->scaling_support[pipe_minus1] = 1;
+
+		coreprops->num_streams[pipe_minus1] =
+			GET_BITS(pixel_misccfg[pipe_minus1], 12, 2) + 1;
+
+		/* Video standards. */
+		coreprops->mpeg2[pipe_minus1] =
+			GET_BITS(pixel_pipecfg[pipe_minus1], 0, 1) ? 1 : 0;
+		coreprops->mpeg4[pipe_minus1] =
+			GET_BITS(pixel_pipecfg[pipe_minus1], 1, 1) ? 1 : 0;
+		coreprops->h264[pipe_minus1] =
+			GET_BITS(pixel_pipecfg[pipe_minus1], 2, 1) ? 1 : 0;
+		coreprops->vc1[pipe_minus1] =
+			GET_BITS(pixel_pipecfg[pipe_minus1], 3, 1) ? 1 : 0;
+		coreprops->jpeg[pipe_minus1] =
+			GET_BITS(pixel_pipecfg[pipe_minus1], 5, 1) ? 1 : 0;
+		coreprops->avs[pipe_minus1] =
+			GET_BITS(pixel_pipecfg[pipe_minus1], 7, 1) ? 1 : 0;
+		coreprops->real[pipe_minus1] =
+			GET_BITS(pixel_pipecfg[pipe_minus1], 8, 1) ? 1 : 0;
+		coreprops->vp6[pipe_minus1] =
+			GET_BITS(pixel_pipecfg[pipe_minus1], 9, 1) ? 1 : 0;
+		coreprops->vp8[pipe_minus1] =
+			GET_BITS(pixel_pipecfg[pipe_minus1], 10, 1) ? 1 : 0;
+		coreprops->hevc[pipe_minus1] =
+			GET_BITS(pixel_pipecfg[pipe_minus1], 22, 1) ? 1 : 0;
+
+		max_bitdepth_luma = (max_bitdepth_luma > current_bitdepth ?
+			max_bitdepth_luma : current_bitdepth);
+		max_h264_hw_chromaformat = (max_h264_hw_chromaformat >
+			current_h264_hw_chromaformat ? max_h264_hw_chromaformat
+			: current_h264_hw_chromaformat);
+		max_hevc_hw_chromaformat = (max_hevc_hw_chromaformat >
+			current_hevc_hw_chromaformat ? max_hevc_hw_chromaformat
+			: current_hevc_hw_chromaformat);
+	}
+
+	/* Override default bit-depth with value signalled explicitly by core. */
+	coreprops->vidstd_props[0].max_luma_bitdepth = max_bitdepth_luma;
+	coreprops->vidstd_props[0].max_chroma_bitdepth =
+		coreprops->vidstd_props[0].max_luma_bitdepth;
+
+	for (i = 1; i < VDEC_STD_MAX; i++) {
+		coreprops->vidstd_props[i].max_luma_bitdepth =
+			coreprops->vidstd_props[0].max_luma_bitdepth;
+		coreprops->vidstd_props[i].max_chroma_bitdepth =
+			coreprops->vidstd_props[0].max_chroma_bitdepth;
+	}
+
+	switch (max_h264_hw_chromaformat) {
+	case 1:
+		coreprops->vidstd_props[VDEC_STD_H264].max_chroma_format =
+			PIXEL_FORMAT_420;
+		break;
+
+	case 2:
+		coreprops->vidstd_props[VDEC_STD_H264].max_chroma_format =
+			PIXEL_FORMAT_422;
+		break;
+
+	case 3:
+		coreprops->vidstd_props[VDEC_STD_H264].max_chroma_format =
+			PIXEL_FORMAT_444;
+		break;
+
+	default:
+		break;
+	}
+
+	switch (max_hevc_hw_chromaformat) {
+	case 1:
+		coreprops->vidstd_props[VDEC_STD_HEVC].max_chroma_format =
+			PIXEL_FORMAT_420;
+		break;
+
+	case 2:
+		coreprops->vidstd_props[VDEC_STD_HEVC].max_chroma_format =
+			PIXEL_FORMAT_422;
+		break;
+
+	case 3:
+		coreprops->vidstd_props[VDEC_STD_HEVC].max_chroma_format =
+			PIXEL_FORMAT_444;
+		break;
+
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+static unsigned char vxd_is_supported_byatleast_onepipe(const unsigned char *features,
+							unsigned int num_pipes)
+{
+	unsigned int i;
+
+	VDEC_ASSERT(features);
+	VDEC_ASSERT(num_pipes <= VDEC_MAX_PIXEL_PIPES);
+
+	for (i = 0; i < num_pipes; i++) {
+		if (features[i])
+			return 1;
+	}
+
+	return 0;
+}
+
+void vxd_set_reconpictcmds(const struct vdecdd_str_unit *str_unit,
+			   const struct vdec_str_configdata *str_configdata,
+			   const struct vdec_str_opconfig *output_config,
+			   const struct vxd_coreprops *coreprops,
+			   const struct vxd_buffers *buffers,
+			   unsigned int *pict_cmds)
+{
+	struct pixel_pixinfo  *pixel_info;
+	unsigned int row_stride_code;
+	unsigned char benable_auxline_buf = 1;
+
+	unsigned int coded_height;
+	unsigned int coded_width;
+	unsigned int disp_height;
+	unsigned int disp_width;
+	unsigned int profile;
+	unsigned char plane;
+	unsigned int y_stride;
+	unsigned int uv_stride;
+	unsigned int v_stride;
+	unsigned int cache_ref_offset;
+	unsigned int cache_row_offset;
+
+	if (str_configdata->vid_std == VDEC_STD_JPEG) {
+		disp_height = 0;
+		disp_width = 0;
+		coded_height = 0;
+		coded_width = 0;
+	} else {
+		coded_height = ALIGN(str_unit->pict_hdr_info->coded_frame_size.height,
+				     (str_unit->pict_hdr_info->field) ?
+				     2 * VDEC_MB_DIMENSION : VDEC_MB_DIMENSION);
+				     /*  Hardware field is coded size - 1 */
+				     coded_height -= 1;
+
+		coded_width = ALIGN(str_unit->pict_hdr_info->coded_frame_size.width,
+				    VDEC_MB_DIMENSION);
+		/*  Hardware field is coded size - 1 */
+		coded_width -= 1;
+
+		disp_height = str_unit->pict_hdr_info->disp_info.enc_disp_region.height
+			+ str_unit->pict_hdr_info->disp_info.enc_disp_region.left_offset - 1;
+		disp_width = str_unit->pict_hdr_info->disp_info.enc_disp_region.width +
+			str_unit->pict_hdr_info->disp_info.enc_disp_region.top_offset - 1;
+	}
+	/*
+	 * Display picture size (DISPLAY_PICTURE)
+	 * The display to be written is not the actual video size to be
+	 * displayed but a number that has to differ from the coded pixel size
+	 * by less than 1MB (coded_size-display_size <= 0x0F). Because H264 can
+	 * have a different display size, we need to check and write
+	 * the coded_size again in the display_size register if this condition
+	 * is not fulfilled.
+	 */
+	if (str_configdata->vid_std != VDEC_STD_VC1 && ((coded_height - disp_height) > 0x0F)) {
+		REGIO_WRITE_FIELD_LITE(pict_cmds[VDECFW_CMD_DISPLAY_PICTURE],
+				       MSVDX_CMDS, DISPLAY_PICTURE_SIZE,
+				       DISPLAY_PICTURE_HEIGHT,
+				       coded_height, unsigned int);
+	} else {
+		REGIO_WRITE_FIELD_LITE(pict_cmds[VDECFW_CMD_DISPLAY_PICTURE],
+				       MSVDX_CMDS, DISPLAY_PICTURE_SIZE,
+				       DISPLAY_PICTURE_HEIGHT,
+				       disp_height, unsigned int);
+	}
+
+	if (((coded_width - disp_width) > 0x0F)) {
+		REGIO_WRITE_FIELD_LITE(pict_cmds[VDECFW_CMD_DISPLAY_PICTURE],
+				       MSVDX_CMDS, DISPLAY_PICTURE_SIZE,
+				       DISPLAY_PICTURE_WIDTH,
+				       coded_width, unsigned int);
+	} else {
+		REGIO_WRITE_FIELD_LITE(pict_cmds[VDECFW_CMD_DISPLAY_PICTURE],
+				       MSVDX_CMDS, DISPLAY_PICTURE_SIZE,
+				       DISPLAY_PICTURE_WIDTH,
+				       disp_width, unsigned int);
+	}
+
+	REGIO_WRITE_FIELD_LITE(pict_cmds[VDECFW_CMD_CODED_PICTURE],
+			       MSVDX_CMDS, CODED_PICTURE_SIZE,
+			       CODED_PICTURE_HEIGHT,
+			       coded_height, unsigned int);
+	REGIO_WRITE_FIELD_LITE(pict_cmds[VDECFW_CMD_CODED_PICTURE],
+			       MSVDX_CMDS, CODED_PICTURE_SIZE,
+			       CODED_PICTURE_WIDTH,
+			       coded_width, unsigned int);
+
+	/*
+	 * For standards where dpb_diff != 1 and chroma format != 420
+	 * cache_ref_offset has to be calculated in the F/W.
+	 */
+	if (str_configdata->vid_std != VDEC_STD_HEVC && str_configdata->vid_std != VDEC_STD_H264) {
+		unsigned int log2_size, cache_size, luma_size;
+		unsigned char is_hevc_supported, is_hevc444_supported = 0;
+
+		is_hevc_supported =
+			vxd_is_supported_byatleast_onepipe(coreprops->hevc,
+							   coreprops->num_pixel_pipes);
+
+		if (is_hevc_supported) {
+			is_hevc444_supported =
+				coreprops->vidstd_props[VDEC_STD_HEVC].max_chroma_format ==
+				PIXEL_FORMAT_444 ? 1 : 0;
+		}
+
+		log2_size = 9 + (is_hevc_supported ? 1 : 0) + (is_hevc444_supported ? 1 : 0);
+		cache_size = 3 << log2_size;
+		luma_size = (cache_size * 2) / 3;
+		cache_ref_offset = (luma_size * 15) / 32;
+		cache_ref_offset = (cache_ref_offset + 7) & (~7);
+		cache_row_offset = 0x0C;
+
+		REGIO_WRITE_FIELD(pict_cmds[VDECFW_CMD_MC_CACHE_CONFIGURATION],
+				  MSVDX_CMDS, MC_CACHE_CONFIGURATION,
+				  CONFIG_REF_CHROMA_ADJUST, 1,
+				  unsigned int, unsigned int);
+		REGIO_WRITE_FIELD(pict_cmds[VDECFW_CMD_MC_CACHE_CONFIGURATION],
+				  MSVDX_CMDS, MC_CACHE_CONFIGURATION,
+				  CONFIG_REF_OFFSET, cache_ref_offset,
+				  unsigned int, unsigned int);
+		REGIO_WRITE_FIELD(pict_cmds[VDECFW_CMD_MC_CACHE_CONFIGURATION],
+				  MSVDX_CMDS, MC_CACHE_CONFIGURATION,
+				  CONFIG_ROW_OFFSET, cache_row_offset,
+				  unsigned int, unsigned int);
+	}
+
+	REGIO_WRITE_FIELD(pict_cmds[VDECFW_CMD_OPERATING_MODE],
+			  MSVDX_CMDS, OPERATING_MODE, CODEC_MODE,
+			  amsvdx_codecmode[str_configdata->vid_std],
+			  unsigned int, unsigned int);
+
+	profile = str_unit->seq_hdr_info->com_sequ_hdr_info.codec_profile;
+	REGIO_WRITE_FIELD(pict_cmds[VDECFW_CMD_OPERATING_MODE],
+			  MSVDX_CMDS, OPERATING_MODE, CODEC_PROFILE,
+			  vxd_getprofile(str_configdata->vid_std, profile),
+			  unsigned int, unsigned int);
+
+	plane = str_unit->seq_hdr_info->com_sequ_hdr_info.separate_chroma_planes;
+	pixel_info = &str_unit->seq_hdr_info->com_sequ_hdr_info.pixel_info;
+	REGIO_WRITE_FIELD(pict_cmds[VDECFW_CMD_OPERATING_MODE],
+			  MSVDX_CMDS, OPERATING_MODE, CHROMA_FORMAT, plane ?
+			  0 : pixel_info->chroma_fmt, unsigned int, int);
+
+	if (str_configdata->vid_std != VDEC_STD_JPEG) {
+		REGIO_WRITE_FIELD(pict_cmds[VDECFW_CMD_EXT_OP_MODE],
+				  MSVDX_CMDS, EXT_OP_MODE, CHROMA_FORMAT_IDC, plane ?
+				  0 : pixel_get_hw_chroma_format_idc
+							(pixel_info->chroma_fmt_idc),
+				  unsigned int, int);
+
+		REGIO_WRITE_FIELD(pict_cmds[VDECFW_CMD_EXT_OP_MODE],
+				  MSVDX_CMDS, EXT_OP_MODE, MEMORY_PACKING,
+				  output_config->pixel_info.mem_pkg ==
+				  PIXEL_BIT10_MP ? 1 : 0, unsigned int, int);
+
+		REGIO_WRITE_FIELD(pict_cmds[VDECFW_CMD_EXT_OP_MODE],
+				  MSVDX_CMDS, EXT_OP_MODE, BIT_DEPTH_LUMA_MINUS8,
+				  pixel_info->bitdepth_y - 8,
+				  unsigned int, unsigned int);
+
+		if (pixel_info->chroma_fmt_idc == PIXEL_FORMAT_MONO) {
+			/*
+			 * For monochrome streams use the same bit depth for
+			 * chroma and luma.
+			 */
+			REGIO_WRITE_FIELD(pict_cmds[VDECFW_CMD_EXT_OP_MODE],
+					  MSVDX_CMDS, EXT_OP_MODE,
+					  BIT_DEPTH_CHROMA_MINUS8,
+					  pixel_info->bitdepth_y - 8,
+					  unsigned int, unsigned int);
+		} else {
+			/*
+			 * For normal streams use the appropriate bit depth for chroma.
+			 */
+			REGIO_WRITE_FIELD(pict_cmds[VDECFW_CMD_EXT_OP_MODE], MSVDX_CMDS,
+					  EXT_OP_MODE, BIT_DEPTH_CHROMA_MINUS8,
+					  pixel_info->bitdepth_c - 8,
+					  unsigned int, unsigned int);
+		}
+	} else {
+		pict_cmds[VDECFW_CMD_EXT_OP_MODE] = 0;
+	}
+
+	if (str_configdata->vid_std != VDEC_STD_JPEG) {
+		REGIO_WRITE_FIELD(pict_cmds[VDECFW_CMD_OPERATING_MODE], MSVDX_CMDS,
+				  OPERATING_MODE, CHROMA_INTERLEAVED,
+				  PIXEL_GET_HW_CHROMA_INTERLEAVED
+				  (output_config->pixel_info.chroma_interleave),
+				  unsigned int, int);
+	}
+
+	if (str_configdata->vid_std == VDEC_STD_JPEG) {
+		REGIO_WRITE_FIELD(pict_cmds[VDECFW_CMD_OPERATING_MODE],
+				  MSVDX_CMDS, OPERATING_MODE, ASYNC_MODE,
+				  VDEC_MSVDX_ASYNC_VDMC,
+				  unsigned int, unsigned int);
+	}
+
+	if (str_configdata->vid_std == VDEC_STD_H264) {
+		REGIO_WRITE_FIELD(pict_cmds[VDECFW_CMD_OPERATING_MODE], MSVDX_CMDS,
+				  OPERATING_MODE, ASYNC_MODE,
+				  str_unit->pict_hdr_info->discontinuous_mbs ?
+				  VDEC_MSVDX_ASYNC_VDMC : VDEC_MSVDX_ASYNC_NORMAL,
+				  unsigned int, int);
+	}
+
+	y_stride = buffers->recon_pict->rend_info.plane_info[VDEC_PLANE_VIDEO_Y].stride;
+	uv_stride = buffers->recon_pict->rend_info.plane_info[VDEC_PLANE_VIDEO_UV].stride;
+	v_stride = buffers->recon_pict->rend_info.plane_info[VDEC_PLANE_VIDEO_V].stride;
+
+	if (((y_stride % (VDEC_VXD_EXT_STRIDE_ALIGNMENT_DEFAULT)) == 0) &&
+	    ((uv_stride % (VDEC_VXD_EXT_STRIDE_ALIGNMENT_DEFAULT)) == 0) &&
+	    ((v_stride % (VDEC_VXD_EXT_STRIDE_ALIGNMENT_DEFAULT)) == 0)) {
+		REGIO_WRITE_FIELD(pict_cmds[VDECFW_CMD_OPERATING_MODE],
+				  MSVDX_CMDS, OPERATING_MODE,
+				  USE_EXT_ROW_STRIDE, 1, unsigned int, int);
+
+		REGIO_WRITE_FIELD(pict_cmds[VDECFW_CMD_EXTENDED_ROW_STRIDE],
+				  MSVDX_CMDS, EXTENDED_ROW_STRIDE,
+				  EXT_ROW_STRIDE, y_stride >> 6, unsigned int, unsigned int);
+
+		REGIO_WRITE_FIELD(pict_cmds[VDECFW_CMD_CHROMA_ROW_STRIDE],
+				  MSVDX_CMDS, CHROMA_ROW_STRIDE,
+				  CHROMA_ROW_STRIDE, uv_stride >> 6, unsigned int, unsigned int);
+	} else {
+		row_stride_code = get_stride_code(str_configdata->vid_std, y_stride);
+
+		REGIO_WRITE_FIELD(pict_cmds[VDECFW_CMD_OPERATING_MODE],
+				  MSVDX_CMDS, OPERATING_MODE, ROW_STRIDE,
+				  row_stride_code & 0x7, unsigned int, unsigned int);
+
+		if (str_configdata->vid_std == VDEC_STD_JPEG) {
+			/*
+			 * Use the unused chroma interleaved flag
+			 * to hold MSB of row stride code
+			 */
+			IMG_ASSERT(row_stride_code < 16);
+			REGIO_WRITE_FIELD(pict_cmds[VDECFW_CMD_OPERATING_MODE],
+					  MSVDX_CMDS, OPERATING_MODE,
+					  CHROMA_INTERLEAVED,
+					  row_stride_code >> 3, unsigned int, unsigned int);
+		} else {
+			IMG_ASSERT(row_stride_code < 8);
+		}
+	}
+	pict_cmds[VDECFW_CMD_LUMA_RECONSTRUCTED_PICTURE_BASE_ADDRESS] =
+		(unsigned int)GET_HOST_ADDR(&buffers->recon_pict->pict_buf->ddbuf_info) +
+		buffers->recon_pict->rend_info.plane_info[0].offset;
+
+	pict_cmds[VDECFW_CMD_CHROMA_RECONSTRUCTED_PICTURE_BASE_ADDRESS] =
+		(unsigned int)GET_HOST_ADDR(&buffers->recon_pict->pict_buf->ddbuf_info) +
+		buffers->recon_pict->rend_info.plane_info[1].offset;
+
+	pict_cmds[VDECFW_CMD_CHROMA2_RECONSTRUCTED_PICTURE_BASE_ADDRESS] =
+		(unsigned int)GET_HOST_ADDR(&buffers->recon_pict->pict_buf->ddbuf_info) +
+		buffers->recon_pict->rend_info.plane_info[2].offset;
+
+	pict_cmds[VDECFW_CMD_LUMA_ERROR_PICTURE_BASE_ADDRESS] = 0;
+	pict_cmds[VDECFW_CMD_CHROMA_ERROR_PICTURE_BASE_ADDRESS] = 0;
+
+#ifdef ERROR_CONCEALMENT
+	/* update error concealment frame info if available */
+	if (buffers->err_pict_bufinfo) {
+		pict_cmds[VDECFW_CMD_LUMA_ERROR_PICTURE_BASE_ADDRESS] =
+			(unsigned int)GET_HOST_ADDR(buffers->err_pict_bufinfo) +
+			buffers->recon_pict->rend_info.plane_info[0].offset;
+
+		pict_cmds[VDECFW_CMD_CHROMA_ERROR_PICTURE_BASE_ADDRESS] =
+			(unsigned int)GET_HOST_ADDR(buffers->err_pict_bufinfo) +
+			buffers->recon_pict->rend_info.plane_info[1].offset;
+	}
+#endif
+
+	pict_cmds[VDECFW_CMD_INTRA_BUFFER_BASE_ADDRESS] =
+		(unsigned int)GET_HOST_ADDR(buffers->intra_bufinfo);
+	pict_cmds[VDECFW_CMD_INTRA_BUFFER_PLANE_SIZE] =
+		buffers->intra_bufsize_per_pipe / 3;
+	pict_cmds[VDECFW_CMD_INTRA_BUFFER_SIZE_PER_PIPE] =
+		buffers->intra_bufsize_per_pipe;
+	pict_cmds[VDECFW_CMD_AUX_LINE_BUFFER_BASE_ADDRESS] =
+		(unsigned int)GET_HOST_ADDR(buffers->auxline_bufinfo);
+	pict_cmds[VDECFW_CMD_AUX_LINE_BUFFER_SIZE_PER_PIPE] =
+		buffers->auxline_bufsize_per_pipe;
+
+	/*
+	 * for pvdec we need to set this registers even if we don't
+	 * use alternative output
+	 */
+	REGIO_WRITE_FIELD(pict_cmds[VDECFW_CMD_ALTERNATIVE_OUTPUT_CONTROL],
+			  MSVDX_CMDS, ALTERNATIVE_OUTPUT_CONTROL,
+			  ALT_BIT_DEPTH_CHROMA_MINUS8,
+			  output_config->pixel_info.bitdepth_c - 8, unsigned int, unsigned int);
+	REGIO_WRITE_FIELD(pict_cmds[VDECFW_CMD_ALTERNATIVE_OUTPUT_CONTROL],
+			  MSVDX_CMDS, ALTERNATIVE_OUTPUT_CONTROL,
+			  ALT_BIT_DEPTH_LUMA_MINUS8,
+			  output_config->pixel_info.bitdepth_y - 8, unsigned int, unsigned int);
+
+	/*
+	 * this is causing corruption in RV40 and VC1 streams with
+	 * scaling/rotation enabled on Coral, so setting to 0
+	 */
+	benable_auxline_buf = benable_auxline_buf &&
+		(str_configdata->vid_std != VDEC_STD_REAL) &&
+		(str_configdata->vid_std != VDEC_STD_VC1);
+
+	REGIO_WRITE_FIELD(pict_cmds[VDECFW_CMD_ALTERNATIVE_OUTPUT_PICTURE_ROTATION],
+			  MSVDX_CMDS, ALTERNATIVE_OUTPUT_PICTURE_ROTATION,
+			  USE_AUX_LINE_BUF, benable_auxline_buf ? 1 : 0, unsigned int, int);
+}
+
+void vxd_set_altpictcmds(const struct vdecdd_str_unit *str_unit,
+			 const struct vdec_str_configdata *str_configdata,
+			 const struct vdec_str_opconfig *output_config,
+			 const struct vxd_coreprops *coreprops,
+			 const struct vxd_buffers *buffers,
+			 unsigned int *pict_cmds)
+{
+	unsigned int row_stride_code;
+	unsigned int y_stride;
+	unsigned int uv_stride;
+	unsigned int v_stride;
+
+	y_stride = buffers->alt_pict->rend_info.plane_info[VDEC_PLANE_VIDEO_Y].stride;
+	uv_stride = buffers->alt_pict->rend_info.plane_info[VDEC_PLANE_VIDEO_UV].stride;
+	v_stride = buffers->alt_pict->rend_info.plane_info[VDEC_PLANE_VIDEO_V].stride;
+
+	if (((y_stride % (VDEC_VXD_EXT_STRIDE_ALIGNMENT_DEFAULT)) == 0) &&
+	    ((uv_stride % (VDEC_VXD_EXT_STRIDE_ALIGNMENT_DEFAULT)) == 0) &&
+	    ((v_stride % (VDEC_VXD_EXT_STRIDE_ALIGNMENT_DEFAULT)) == 0)) {
+		REGIO_WRITE_FIELD(pict_cmds[VDECFW_CMD_ALTERNATIVE_OUTPUT_PICTURE_ROTATION],
+				  MSVDX_CMDS, ALTERNATIVE_OUTPUT_PICTURE_ROTATION,
+				  USE_EXT_ROT_ROW_STRIDE, 1, unsigned int, int);
+
+		/* 64-byte (min) aligned luma stride value. */
+		REGIO_WRITE_FIELD(pict_cmds[VDECFW_CMD_ALTERNATIVE_OUTPUT_PICTURE_ROTATION],
+				  MSVDX_CMDS,
+				  ALTERNATIVE_OUTPUT_PICTURE_ROTATION,
+				  EXT_ROT_ROW_STRIDE, y_stride >> 6,
+				  unsigned int, unsigned int);
+
+		/* 64-byte (min) aligned chroma stride value. */
+		REGIO_WRITE_FIELD(pict_cmds[VDECFW_CMD_CHROMA_ROW_STRIDE],
+				  MSVDX_CMDS, CHROMA_ROW_STRIDE,
+				  ALT_CHROMA_ROW_STRIDE, uv_stride >> 6,
+				  unsigned int, unsigned int);
+	} else {
+		/*
+		 * Obtain the code for buffer stride
+		 * (must be less than 8, i.e. not JPEG strides)
+		 */
+		row_stride_code =
+			get_stride_code(str_configdata->vid_std, y_stride);
+
+		REGIO_WRITE_FIELD(pict_cmds[VDECFW_CMD_ALTERNATIVE_OUTPUT_PICTURE_ROTATION],
+				  MSVDX_CMDS,
+				  ALTERNATIVE_OUTPUT_PICTURE_ROTATION,
+				  ROTATION_ROW_STRIDE, row_stride_code & 0x7,
+				  unsigned int, unsigned int);
+	}
+
+	REGIO_WRITE_FIELD(pict_cmds[VDECFW_CMD_ALTERNATIVE_OUTPUT_PICTURE_ROTATION],
+			  MSVDX_CMDS, ALTERNATIVE_OUTPUT_PICTURE_ROTATION,
+			  SCALE_INPUT_SIZE_SEL,
+			  ((output_config->pixel_info.chroma_fmt_idc !=
+			  str_unit->seq_hdr_info->com_sequ_hdr_info.pixel_info.chroma_fmt_idc)) ?
+			  1 : 0, unsigned int, int);
+
+	REGIO_WRITE_FIELD(pict_cmds[VDECFW_CMD_ALTERNATIVE_OUTPUT_PICTURE_ROTATION],
+			  MSVDX_CMDS, ALTERNATIVE_OUTPUT_PICTURE_ROTATION,
+			  PACKED_422_OUTPUT,
+			  (output_config->pixel_info.chroma_fmt_idc ==
+			  PIXEL_FORMAT_422 &&
+			  output_config->pixel_info.num_planes == 1) ? 1 : 0,
+			  unsigned int, int);
+
+	REGIO_WRITE_FIELD(pict_cmds[VDECFW_CMD_ALTERNATIVE_OUTPUT_CONTROL],
+			  MSVDX_CMDS, ALTERNATIVE_OUTPUT_CONTROL,
+			  ALT_OUTPUT_FORMAT,
+			  str_unit->seq_hdr_info->com_sequ_hdr_info.separate_chroma_planes ?
+			  0 : pixel_get_hw_chroma_format_idc
+					(output_config->pixel_info.chroma_fmt_idc),
+			  unsigned int, int);
+
+	REGIO_WRITE_FIELD(pict_cmds[VDECFW_CMD_ALTERNATIVE_OUTPUT_CONTROL],
+			  MSVDX_CMDS, ALTERNATIVE_OUTPUT_CONTROL,
+			  ALT_BIT_DEPTH_CHROMA_MINUS8,
+			  output_config->pixel_info.bitdepth_c - 8,
+			  unsigned int, unsigned int);
+	REGIO_WRITE_FIELD(pict_cmds[VDECFW_CMD_ALTERNATIVE_OUTPUT_CONTROL],
+			  MSVDX_CMDS, ALTERNATIVE_OUTPUT_CONTROL,
+			  ALT_BIT_DEPTH_LUMA_MINUS8,
+			  output_config->pixel_info.bitdepth_y - 8,
+			  unsigned int, unsigned int);
+	REGIO_WRITE_FIELD(pict_cmds[VDECFW_CMD_ALTERNATIVE_OUTPUT_CONTROL],
+			  MSVDX_CMDS, ALTERNATIVE_OUTPUT_CONTROL,
+			  ALT_MEMORY_PACKING,
+			  (output_config->pixel_info.mem_pkg ==
+			  PIXEL_BIT10_MP) ? 1 : 0, unsigned int, int);
+
+	pict_cmds[VDECFW_CMD_LUMA_ALTERNATIVE_PICTURE_BASE_ADDRESS] =
+		(unsigned int)GET_HOST_ADDR(&buffers->alt_pict->pict_buf->ddbuf_info) +
+		buffers->alt_pict->rend_info.plane_info[0].offset;
+
+	pict_cmds[VDECFW_CMD_CHROMA_ALTERNATIVE_PICTURE_BASE_ADDRESS] =
+		(unsigned int)GET_HOST_ADDR(&buffers->alt_pict->pict_buf->ddbuf_info) +
+		buffers->alt_pict->rend_info.plane_info[1].offset;
+
+	pict_cmds[VDECFW_CMD_CHROMA2_ALTERNATIVE_PICTURE_BASE_ADDRESS] =
+		(unsigned int)GET_HOST_ADDR(&buffers->alt_pict->pict_buf->ddbuf_info) +
+		buffers->alt_pict->rend_info.plane_info[2].offset;
+}
+
+int vxd_getscalercmds(const struct scaler_config *scaler_config,
+		      const struct scaler_pitch *pitch,
+		      const struct scaler_filter *filter,
+		      const struct pixel_pixinfo *out_loop_pixel_info,
+		      struct scaler_params *params,
+		      unsigned int *pict_cmds)
+{
+	const struct vxd_coreprops *coreprops = scaler_config->coreprops;
+	/*
+	 * Indirectly detect decoder core type (if HEVC is supported, it has
+	 * to be PVDEC core) and decide if to force luma re-sampling.
+	 */
+	unsigned char bforce_luma_resampling = coreprops->hevc[0];
+
+	REGIO_WRITE_FIELD(pict_cmds[VDECFW_CMD_ALTERNATIVE_OUTPUT_CONTROL],
+			  MSVDX_CMDS, ALTERNATIVE_OUTPUT_CONTROL,
+			  ALT_OUTPUT_FORMAT,
+			  scaler_config->bseparate_chroma_planes ? 0 :
+			  pixel_get_hw_chroma_format_idc(out_loop_pixel_info->chroma_fmt_idc),
+			  unsigned int, int);
+
+	REGIO_WRITE_FIELD(pict_cmds[VDECFW_CMD_ALTERNATIVE_OUTPUT_CONTROL],
+			  MSVDX_CMDS, ALTERNATIVE_OUTPUT_CONTROL,
+			  SCALE_CHROMA_RESAMP_ONLY, bforce_luma_resampling ? 0 :
+			  (pitch->horiz_luma == FIXED(1, HIGHP)) &&
+			  (pitch->vert_luma == FIXED(1, HIGHP)), unsigned int, int);
+
+	REGIO_WRITE_FIELD(pict_cmds[VDECFW_CMD_ALTERNATIVE_OUTPUT_CONTROL],
+			  MSVDX_CMDS, ALTERNATIVE_OUTPUT_CONTROL, ALT_MEMORY_PACKING,
+			  pixel_get_hw_memory_packing(out_loop_pixel_info->mem_pkg),
+			  unsigned int, int);
+
+	REGIO_WRITE_FIELD(pict_cmds[VDECFW_CMD_ALTERNATIVE_OUTPUT_CONTROL],
+			  MSVDX_CMDS, ALTERNATIVE_OUTPUT_CONTROL,
+			  ALT_BIT_DEPTH_LUMA_MINUS8,
+			  out_loop_pixel_info->bitdepth_y - 8,
+			  unsigned int, unsigned int);
+
+	REGIO_WRITE_FIELD(pict_cmds[VDECFW_CMD_ALTERNATIVE_OUTPUT_CONTROL],
+			  MSVDX_CMDS, ALTERNATIVE_OUTPUT_CONTROL,
+			  ALT_BIT_DEPTH_CHROMA_MINUS8,
+			  out_loop_pixel_info->bitdepth_c - 8,
+			  unsigned int, unsigned int);
+
+	/* Scale luma bifilter is always 0 for now */
+	REGIO_WRITE_FIELD(pict_cmds[VDECFW_CMD_ALTERNATIVE_OUTPUT_CONTROL],
+			  MSVDX_CMDS, ALTERNATIVE_OUTPUT_CONTROL,
+			  SCALE_LUMA_BIFILTER_HORIZ,
+			  0, unsigned int, int);
+
+	REGIO_WRITE_FIELD(pict_cmds[VDECFW_CMD_ALTERNATIVE_OUTPUT_CONTROL],
+			  MSVDX_CMDS, ALTERNATIVE_OUTPUT_CONTROL,
+			  SCALE_LUMA_BIFILTER_VERT,
+			  0, unsigned int, int);
+
+	REGIO_WRITE_FIELD(pict_cmds[VDECFW_CMD_ALTERNATIVE_OUTPUT_CONTROL],
+			  MSVDX_CMDS, ALTERNATIVE_OUTPUT_CONTROL,
+			  SCALE_CHROMA_BIFILTER_HORIZ,
+			  filter->bhoriz_bilinear ? 1 : 0,
+			  unsigned int, int);
+
+	REGIO_WRITE_FIELD(pict_cmds[VDECFW_CMD_ALTERNATIVE_OUTPUT_CONTROL],
+			  MSVDX_CMDS, ALTERNATIVE_OUTPUT_CONTROL,
+			  SCALE_CHROMA_BIFILTER_VERT,
+			   filter->bvert_bilinear ? 1 : 0, unsigned int, int);
+
+	/* for cores 7.x.x and more, precision 3.13 */
+	params->fixed_point_shift = 13;
+
+	/* Calculate the fixed-point versions for use by the hardware. */
+	params->vert_pitch = (int)((pitch->vert_luma +
+		(1 << (HIGHP - params->fixed_point_shift - 1))) >>
+		(HIGHP - params->fixed_point_shift));
+	params->vert_startpos = params->vert_pitch >> 1;
+	params->vert_pitch_chroma = (int)((pitch->vert_chroma +
+		(1 << (HIGHP - params->fixed_point_shift - 1))) >>
+		(HIGHP - params->fixed_point_shift));
+	params->vert_startpos_chroma = params->vert_pitch_chroma >> 1;
+	params->horz_pitch = (int)(pitch->horiz_luma >>
+		(HIGHP - params->fixed_point_shift));
+	params->horz_startpos = params->horz_pitch >> 1;
+	params->horz_pitch_chroma = (int)(pitch->horiz_chroma >>
+		(HIGHP - params->fixed_point_shift));
+	params->horz_startpos_chroma = params->horz_pitch_chroma >> 1;
+
+#ifdef HAS_HEVC
+	if (scaler_config->vidstd == VDEC_STD_HEVC) {
+		REGIO_WRITE_FIELD(pict_cmds[VDECFW_CMD_SCALED_DISPLAY_SIZE],
+				  MSVDX_CMDS, PVDEC_SCALED_DISPLAY_SIZE,
+				  PVDEC_SCALE_DISPLAY_WIDTH,
+				  scaler_config->recon_width - 1,
+				  unsigned int, unsigned int);
+		REGIO_WRITE_FIELD(pict_cmds[VDECFW_CMD_SCALED_DISPLAY_SIZE],
+				  MSVDX_CMDS, PVDEC_SCALED_DISPLAY_SIZE,
+				  PVDEC_SCALE_DISPLAY_HEIGHT,
+				  scaler_config->recon_height - 1,
+				  unsigned int, unsigned int);
+	} else {
+		REGIO_WRITE_FIELD(pict_cmds[VDECFW_CMD_SCALED_DISPLAY_SIZE],
+				  MSVDX_CMDS, SCALED_DISPLAY_SIZE,
+				  SCALE_DISPLAY_WIDTH,
+				  scaler_config->recon_width - 1,
+				  unsigned int, unsigned int);
+		REGIO_WRITE_FIELD(pict_cmds[VDECFW_CMD_SCALED_DISPLAY_SIZE],
+				  MSVDX_CMDS, SCALED_DISPLAY_SIZE,
+				  SCALE_DISPLAY_HEIGHT,
+				  scaler_config->recon_height - 1,
+				  unsigned int, unsigned int);
+	}
+#else
+	REGIO_WRITE_FIELD(pict_cmds[VDECFW_CMD_SCALED_DISPLAY_SIZE],
+			  MSVDX_CMDS, SCALED_DISPLAY_SIZE,
+			  SCALE_DISPLAY_WIDTH,
+			  scaler_config->recon_width - 1,
+			  unsigned int, unsigned int);
+	REGIO_WRITE_FIELD(pict_cmds[VDECFW_CMD_SCALED_DISPLAY_SIZE],
+			  MSVDX_CMDS, SCALED_DISPLAY_SIZE, SCALE_DISPLAY_HEIGHT,
+			  scaler_config->recon_height - 1,
+			  unsigned int, unsigned int);
+#endif
+
+	REGIO_WRITE_FIELD(pict_cmds[VDECFW_CMD_SCALE_OUTPUT_SIZE],
+			  MSVDX_CMDS, SCALE_OUTPUT_SIZE,
+			  SCALE_OUTPUT_WIDTH_MIN1,
+			  scaler_config->scale_width - 1,
+			  unsigned int, unsigned int);
+	REGIO_WRITE_FIELD(pict_cmds[VDECFW_CMD_SCALE_OUTPUT_SIZE],
+			  MSVDX_CMDS, SCALE_OUTPUT_SIZE,
+			  SCALE_OUTPUT_HEIGHT_MIN1,
+			  scaler_config->scale_height - 1,
+			  unsigned int, unsigned int);
+
+	REGIO_WRITE_FIELD(pict_cmds[VDECFW_CMD_HORIZONTAL_SCALE_CONTROL],
+			  MSVDX_CMDS, HORIZONTAL_SCALE_CONTROL,
+			  HORIZONTAL_SCALE_PITCH, params->horz_pitch,
+			  unsigned int, unsigned int);
+	REGIO_WRITE_FIELD(pict_cmds[VDECFW_CMD_HORIZONTAL_SCALE_CONTROL],
+			  MSVDX_CMDS, HORIZONTAL_SCALE_CONTROL,
+			  HORIZONTAL_INITIAL_POS, params->horz_startpos,
+			  unsigned int, unsigned int);
+
+	REGIO_WRITE_FIELD(pict_cmds[VDECFW_CMD_SCALE_HORIZONTAL_CHROMA],
+			  MSVDX_CMDS, SCALE_HORIZONTAL_CHROMA,
+			  CHROMA_HORIZONTAL_PITCH, params->horz_pitch_chroma,
+			  unsigned int, unsigned int);
+	REGIO_WRITE_FIELD(pict_cmds[VDECFW_CMD_SCALE_HORIZONTAL_CHROMA],
+			  MSVDX_CMDS, SCALE_HORIZONTAL_CHROMA,
+			  CHROMA_HORIZONTAL_INITIAL,
+			  params->horz_startpos_chroma,
+			  unsigned int, unsigned int);
+
+	REGIO_WRITE_FIELD(pict_cmds[VDECFW_CMD_VERTICAL_SCALE_CONTROL],
+			  MSVDX_CMDS, VERTICAL_SCALE_CONTROL,
+			  VERTICAL_SCALE_PITCH, params->vert_pitch,
+			  unsigned int, unsigned int);
+	REGIO_WRITE_FIELD(pict_cmds[VDECFW_CMD_VERTICAL_SCALE_CONTROL],
+			  MSVDX_CMDS, VERTICAL_SCALE_CONTROL,
+			  VERTICAL_INITIAL_POS, params->vert_startpos,
+			  unsigned int, unsigned int);
+
+	REGIO_WRITE_FIELD(pict_cmds[VDECFW_CMD_SCALE_VERTICAL_CHROMA],
+			  MSVDX_CMDS, SCALE_VERTICAL_CHROMA,
+			  CHROMA_VERTICAL_PITCH, params->vert_pitch_chroma,
+			  unsigned int, unsigned int);
+	REGIO_WRITE_FIELD(pict_cmds[VDECFW_CMD_SCALE_VERTICAL_CHROMA],
+			  MSVDX_CMDS, SCALE_VERTICAL_CHROMA,
+			  CHROMA_VERTICAL_INITIAL,
+			  params->vert_startpos_chroma,
+			  unsigned int, unsigned int);
+	return 0;
+}
+
+unsigned int vxd_get_codedpicsize(unsigned short width_min1, unsigned short height_min1)
+{
+	unsigned int reg = 0;
+
+	REGIO_WRITE_FIELD_LITE(reg, MSVDX_CMDS, CODED_PICTURE_SIZE,
+			       CODED_PICTURE_WIDTH, width_min1,
+			       unsigned short);
+	REGIO_WRITE_FIELD_LITE(reg, MSVDX_CMDS, CODED_PICTURE_SIZE,
+			       CODED_PICTURE_HEIGHT, height_min1,
+			       unsigned short);
+
+	return reg;
+}
+
+unsigned char vxd_get_codedmode(enum vdec_vid_std vidstd)
+{
+	return (unsigned char)amsvdx_codecmode[vidstd];
+}
+
+void vxd_get_coreproperties(void *hndl_coreproperties,
+			    struct vxd_coreprops *vxd_coreprops)
+{
+	struct vxd_core_props *props =
+		(struct vxd_core_props *)hndl_coreproperties;
+
+	vxd_getcoreproperties(vxd_coreprops, props->core_rev,
+			      props->pvdec_core_id,
+			      props->mmu_config0,
+			      props->mmu_config1,
+			      props->pixel_pipe_cfg,
+			      props->pixel_misc_cfg,
+			      props->pixel_max_frame_cfg);
+}
+
+int vxd_get_pictattrs(unsigned int flags, struct vxd_pict_attrs *pict_attrs)
+{
+	if (flags & (VXD_FW_MSG_FLAG_DWR | VXD_FW_MSG_FLAG_FATAL))
+		pict_attrs->dwrfired = 1;
+	if (flags & VXD_FW_MSG_FLAG_MMU_FAULT)
+		pict_attrs->mmufault = 1;
+	if (flags & VXD_FW_MSG_FLAG_DEV_ERR)
+		pict_attrs->deverror = 1;
+
+	return 0;
+}
+
+int vxd_get_msgerrattr(unsigned int flags, enum vxd_msg_attr *msg_attr)
+{
+	if ((flags & ~VXD_FW_MSG_FLAG_CANCELED))
+		*msg_attr = VXD_MSG_ATTR_FATAL;
+	else if ((flags & VXD_FW_MSG_FLAG_CANCELED))
+		*msg_attr = VXD_MSG_ATTR_CANCELED;
+	else
+		*msg_attr = VXD_MSG_ATTR_NONE;
+
+	return 0;
+}
+
+int vxd_set_msgflag(enum vxd_msg_flag input_flag, unsigned int *flags)
+{
+	switch (input_flag) {
+	case VXD_MSG_FLAG_DROP:
+		*flags |= VXD_FW_MSG_FLAG_DROP;
+		break;
+	case VXD_MSG_FLAG_EXCL:
+		*flags |= VXD_FW_MSG_FLAG_EXCL;
+		break;
+	default:
+		return IMG_ERROR_FATAL;
+	}
+
+	return 0;
+}
diff --git a/drivers/staging/media/vxd/decoder/vxd_int.h b/drivers/staging/media/vxd/decoder/vxd_int.h
new file mode 100644
index 000000000000..a294e0d6044f
--- /dev/null
+++ b/drivers/staging/media/vxd/decoder/vxd_int.h
@@ -0,0 +1,128 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * VXD DEC Common low level core interface component
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ *	Amit Makani <amit.makani@ti.com>
+ *
+ * Re-written for upstreamimg
+ *	Sidraya Jayagond <sidraya.bj@pathpartnertech.com>
+ *	Prashanth Kumar Amai <prashanth.ka@pathpartnertech.com>
+ */
+#ifndef _VXD_INT_H
+#define _VXD_INT_H
+
+#include "fw_interface.h"
+#include "scaler_setup.h"
+#include "vdecdd_defs.h"
+#include "vdecfw_shared.h"
+#include "vdec_defs.h"
+#include "vxd_ext.h"
+#include "vxd_props.h"
+
+/*
+ * Size of buffer used for batching messages
+ */
+#define BATCH_MSG_BUFFER_SIZE           (8 * 4096)
+
+#define INTRA_BUF_SIZE                  (1024 * 32)
+#define AUX_LINE_BUFFER_SIZE            (512 * 1024)
+
+#define MAX_PICTURE_WIDTH               (4096)
+#define MAX_PICTURE_HEIGHT              (4096)
+
+/*
+ * this macro returns the host address of device buffer.
+ */
+#define GET_HOST_ADDR(buf) ((buf)->dev_virt)
+
+#define GET_HOST_ADDR_OFFSET(buf, offset) (((buf)->dev_virt) + (offset))
+
+/*
+ * The extended stride alignment for VXD.
+ */
+#define VDEC_VXD_EXT_STRIDE_ALIGNMENT_DEFAULT  (64)
+
+struct vxd_buffers {
+	struct vdecdd_ddpict_buf *recon_pict;
+	struct vdecdd_ddpict_buf *alt_pict;
+	struct vidio_ddbufinfo *intra_bufinfo;
+	struct vidio_ddbufinfo *auxline_bufinfo;
+	struct vidio_ddbufinfo *err_pict_bufinfo;
+	unsigned int intra_bufsize_per_pipe;
+	unsigned int auxline_bufsize_per_pipe;
+	struct vidio_ddbufinfo *msb_bufinfo;
+	unsigned char btwopass;
+};
+
+struct pvdec_core_rev {
+	unsigned int maj_rev;
+	unsigned int min_rev;
+	unsigned int maint_rev;
+	unsigned int int_rev;
+};
+
+/*
+ * this has all that it needs to translate a Stream Unit for a picture
+ * into a transaction.
+ */
+void vxd_set_altpictcmds(const struct vdecdd_str_unit *str_unit,
+			 const struct vdec_str_configdata *str_configdata,
+			 const struct vdec_str_opconfig *output_config,
+			 const struct vxd_coreprops *coreprops,
+			 const struct vxd_buffers *buffers,
+			 unsigned int *pict_cmds);
+
+/*
+ * this has all that it needs to translate a Stream Unit for
+ * a picture into a transaction.
+ */
+void vxd_set_reconpictcmds(const struct vdecdd_str_unit *str_unit,
+			   const struct vdec_str_configdata *str_configdata,
+			   const struct vdec_str_opconfig *output_config,
+			   const struct vxd_coreprops *coreprops,
+			   const struct vxd_buffers *buffers,
+			   unsigned int *pict_cmds);
+
+int vxd_getscalercmds(const struct scaler_config *scaler_config,
+		      const struct scaler_pitch *pitch,
+		      const struct scaler_filter *filter,
+		      const struct pixel_pixinfo *out_loop_pixel_info,
+		      struct scaler_params *params,
+		      unsigned int *pict_cmds);
+
+/*
+ * this creates value of MSVDX_CMDS_CODED_PICTURE_SIZE register.
+ */
+unsigned int vxd_get_codedpicsize(unsigned short width_min1, unsigned short height_min1);
+
+/*
+ * return HW codec mode based on video standard.
+ */
+unsigned char vxd_get_codedmode(enum vdec_vid_std vidstd);
+
+/*
+ * translates core properties to the form of the struct vxd_coreprops struct.
+ */
+void vxd_get_coreproperties(void *hndl_coreproperties,
+			    struct vxd_coreprops *vxd_coreprops);
+
+/*
+ * translates picture attributes to the form of the VXD_sPictAttrs struct.
+ */
+int vxd_get_pictattrs(unsigned int flags, struct vxd_pict_attrs *pict_attrs);
+
+/*
+ * translates message attributes to the form of the VXD_eMsgAttr struct.
+ */
+int vxd_get_msgerrattr(unsigned int flags, enum vxd_msg_attr *msg_attr);
+
+/*
+ * sets a message flag.
+ */
+int vxd_set_msgflag(enum vxd_msg_flag input_flag, unsigned int *flags);
+
+#endif /* _VXD_INT_H */
-- 
2.17.1


-- 






This
message contains confidential information and is intended only 
for the
individual(s) named. If you are not the intended
recipient, you are 
notified that disclosing, copying, distributing or taking any
action in 
reliance on the contents of this mail and attached file/s is strictly
prohibited. Please notify the
sender immediately and delete this e-mail 
from your system. E-mail transmission
cannot be guaranteed to be secured or 
error-free as information could be
intercepted, corrupted, lost, destroyed, 
arrive late or incomplete, or contain
viruses. The sender therefore does 
not accept liability for any errors or
omissions in the contents of this 
message, which arise as a result of e-mail
transmission.

^ permalink raw reply related	[flat|nested] 48+ messages in thread

* [PATCH 08/30] v4l: vxd-dec: Add translation control modules
  2021-08-18 14:10 [PATCH 00/30] TI Video Decoder driver upstreaming to v5.14-rc6 kernel sidraya.bj
                   ` (6 preceding siblings ...)
  2021-08-18 14:10 ` [PATCH 07/30] v4l: vxd-dec: Add vxd core module sidraya.bj
@ 2021-08-18 14:10 ` sidraya.bj
  2021-08-18 14:10 ` [PATCH 09/30] v4l: vxd-dec: Add idgen api modules sidraya.bj
                   ` (23 subsequent siblings)
  31 siblings, 0 replies; 48+ messages in thread
From: sidraya.bj @ 2021-08-18 14:10 UTC (permalink / raw)
  To: gregkh, linux-staging, linux-kernel
  Cc: prashanth.ka, praneeth, mchehab, linux-media, praveen.ap, Sidraya

From: Sidraya <sidraya.bj@pathpartnertech.com>

This patch adds the control allocation buffer for firmware
and gets the data from decoder module and sent it to firmware
through hardware control module.

It prepares all the standard headers, dma transfer commands,
vlc table information and sends it to firmware.

Signed-off-by: Amit Makani <amit.makani@ti.com>
Signed-off-by: Sidraya <sidraya.bj@pathpartnertech.com>
---
 MAINTAINERS                                   |    2 +
 .../media/vxd/decoder/translation_api.c       | 1725 +++++++++++++++++
 .../media/vxd/decoder/translation_api.h       |   42 +
 3 files changed, 1769 insertions(+)
 create mode 100644 drivers/staging/media/vxd/decoder/translation_api.c
 create mode 100644 drivers/staging/media/vxd/decoder/translation_api.h

diff --git a/MAINTAINERS b/MAINTAINERS
index 7b21ebfc61d4..538faa644d13 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -19545,6 +19545,8 @@ F:	drivers/staging/media/vxd/common/imgmmu.h
 F:	drivers/staging/media/vxd/decoder/hw_control.c
 F:	drivers/staging/media/vxd/decoder/hw_control.h
 F:	drivers/staging/media/vxd/decoder/img_dec_common.h
+F:	drivers/staging/media/vxd/decoder/translation_api.c
+F:	drivers/staging/media/vxd/decoder/translation_api.h
 F:	drivers/staging/media/vxd/decoder/vxd_core.c
 F:	drivers/staging/media/vxd/decoder/vxd_dec.c
 F:	drivers/staging/media/vxd/decoder/vxd_dec.h
diff --git a/drivers/staging/media/vxd/decoder/translation_api.c b/drivers/staging/media/vxd/decoder/translation_api.c
new file mode 100644
index 000000000000..af8924bb5173
--- /dev/null
+++ b/drivers/staging/media/vxd/decoder/translation_api.c
@@ -0,0 +1,1725 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * VDECDD translation APIs.
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ *	Amit Makani <amit.makani@ti.com>
+ *
+ * Re-written for upstreamimg
+ *	Sidraya Jayagond <sidraya.bj@pathpartnertech.com>
+ *	Prashanth Kumar Amai <prashanth.ka@pathpartnertech.com>
+ */
+
+/* As of now we are defining HAS_H264 */
+#define HAS_H264
+#define VDEC_USE_PVDEC
+
+#include <linux/types.h>
+#include <linux/dma-mapping.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-mem2mem.h>
+
+#include "fw_interface.h"
+#ifdef HAS_H264
+#include "h264fw_data.h"
+#endif /* HAS_H264 */
+#include "hw_control.h"
+#include "img_errors.h"
+#include "img_msvdx_cmds.h"
+#include "img_msvdx_vec_regs.h"
+#ifdef VDEC_USE_PVDEC
+#include "pvdec_int.h"
+#include "img_pvdec_core_regs.h"
+#endif
+#include "img_video_bus4_mmu_regs.h"
+#include "lst.h"
+#include "reg_io2.h"
+#include "rman_api.h"
+#include "translation_api.h"
+#include "vdecdd_defs.h"
+#include "vdecdd_utils.h"
+#include "vdecfw_share.h"
+#include "vxd_int.h"
+#include "vxd_props.h"
+
+#ifdef HAS_HEVC
+#include "hevcfw_data.h"
+#include "pvdec_entropy_regs.h"
+#include "pvdec_vec_be_regs.h"
+#endif
+
+#ifdef HAS_JPEG
+#include "jpegfw_data.h"
+#endif /* HAS_JPEG */
+
+#define NO_VALUE        0
+
+/*
+ * Discontinuity in layout of VEC_VLC_TABLE* registers.
+ * Address of VEC_VLC_TABLE_ADDR16 does not immediately follow
+ * VEC_VLC_TABLE_ADDR15, see TRM.
+ */
+#define VEC_VLC_TABLE_ADDR_PT1_SIZE  16 /* in 32-bit words */
+#define VEC_VLC_TABLE_ADDR_DISCONT   (VEC_VLC_TABLE_ADDR_PT1_SIZE * \
+	PVDECIO_VLC_IDX_ADDR_PARTS)
+
+/*
+ * now it can be done by VXD_GetCodecMode
+ * Imply standard from OperatingMode.
+ * As of now only H264 supported through the file.
+ */
+#define CODEC_MODE_JPEG     0x0
+#define CODEC_MODE_H264         0x1
+#define CODEC_MODE_REAL8        0x8
+#define CODEC_MODE_REAL9        0x9
+
+/*
+ * This enum defines values of ENTDEC_BE_MODE field of VEC_ENTDEC_BE_CONTROL
+ * register and ENTDEC_FE_MODE field of VEC_ENTDEC_FE_CONTROL register.
+ */
+enum decode_mode {
+	/* JPEG */
+	VDEC_ENTDEC_MODE_JPEG   = 0x0,
+	/* H264 (MPEG4/AVC) */
+	VDEC_ENTDEC_MODE_H264   = 0x1,
+	VDEC_ENTDEC_MODE_FORCE32BITS = 0x7FFFFFFFU
+};
+
+/*
+ * This has all that it needs to translate a Stream Unit for a picture into a
+ * transaction.
+ */
+static int translation_set_buffer(struct vdecdd_ddpict_buf *picbuf,
+				  struct vdecfw_image_buffer *image_buffer)
+{
+	unsigned int i;
+
+	for (i = 0; i < VDEC_PLANE_MAX; i++) {
+		image_buffer->byte_offset[i] =
+			(unsigned int)GET_HOST_ADDR(&picbuf->pict_buf->ddbuf_info) +
+			picbuf->rend_info.plane_info[i].offset;
+		pr_debug("%s image_buffer->byte_offset[%d] = 0x%x\n",
+			 __func__, i, image_buffer->byte_offset[i]);
+	}
+	return IMG_SUCCESS;
+}
+
+#ifdef HAS_HEVC
+/*
+ * @Function              translation_hevc_header
+ */
+static int translation_hevc_header(struct vdecdd_picture *picture,
+				   struct dec_decpict *dec_pict,
+				   struct hevcfw_headerdata *header_data)
+{
+	translation_set_buffer(dec_pict->recon_pict, &header_data->primary);
+
+	if (dec_pict->alt_pict)
+		translation_set_buffer(dec_pict->alt_pict, &header_data->alternate);
+
+	VDEC_ASSERT(picture);
+	VDEC_ASSERT(picture->pict_res_int);
+	VDEC_ASSERT(picture->pict_res_int->mb_param_buf);
+	header_data->temporal_outaddr = (unsigned int)GET_HOST_ADDR
+					(&picture->pict_res_int->mb_param_buf->ddbuf_info);
+
+	return IMG_SUCCESS;
+}
+#endif
+
+#ifdef HAS_H264
+static int translation_h264header(struct vdecdd_picture *pspicture,
+				  struct dec_decpict *dec_pict,
+				  struct h264fw_header_data *psheaderdata,
+				  struct vdec_str_configdata *psstrconfigdata)
+{
+	psheaderdata->two_pass_flag = dec_pict->pict_hdr_info->discontinuous_mbs;
+	psheaderdata->disable_mvc = psstrconfigdata->disable_mvc;
+
+	/*
+	 * As of now commenting the mb params base address as we are not using,
+	 * if needed in future please un comment and make the allocation for
+	 * pict_res_int.
+	 */
+	/* Obtain the MB parameter address from the stream unit. */
+	if (pspicture->pict_res_int->mb_param_buf) {
+		psheaderdata->mbparams_base_address =
+		(unsigned int)GET_HOST_ADDR(&pspicture->pict_res_int->mb_param_buf->ddbuf_info);
+		psheaderdata->mbparams_size_per_plane =
+			pspicture->pict_res_int->mb_param_buf->ddbuf_info.buf_size / 3;
+	} else {
+		psheaderdata->mbparams_base_address = 0;
+		psheaderdata->mbparams_size_per_plane = 0;
+	}
+	psheaderdata->slicegroupmap_base_address =
+		(unsigned int)GET_HOST_ADDR(&dec_pict->cur_pict_dec_res->h264_sgm_buf);
+
+	translation_set_buffer(dec_pict->recon_pict, &psheaderdata->primary);
+
+	if (dec_pict->alt_pict)
+		translation_set_buffer(dec_pict->alt_pict, &psheaderdata->alternate);
+
+	/* Signal whether we have PPS for the second field. */
+	if (pspicture->dec_pict_aux_info.second_pps_id == BSPP_INVALID)
+		psheaderdata->second_pps = 0;
+	else
+		psheaderdata->second_pps = 1;
+
+	return IMG_SUCCESS;
+}
+#endif /* HAS_H264 */
+
+#ifdef HAS_JPEG
+
+static int translation_jpegheader(const struct bspp_sequ_hdr_info *seq,
+				  const struct dec_decpict *dec_pict,
+				  const struct bspp_pict_hdr_info *pict_hdrinfo,
+				  struct jpegfw_header_data *header_data)
+{
+	unsigned int i;
+
+	/* Output picture planes addresses */
+	for (i = 0; i < seq->com_sequ_hdr_info.pixel_info.num_planes; i++) {
+		header_data->plane_offsets[i] =
+			(unsigned int)GET_HOST_ADDR(&dec_pict->recon_pict->pict_buf->ddbuf_info) +
+			dec_pict->recon_pict->rend_info.plane_info[i].offset;
+	}
+
+	/* copy the expected SOS fields number */
+	header_data->hdr_sos_count = pict_hdrinfo->sos_count;
+
+	translation_set_buffer(dec_pict->recon_pict, &header_data->primary);
+
+	return IMG_SUCCESS;
+}
+#endif /* HAS_JPEG */
+/*
+ * This function translates host video standard enum (VDEC_eVidStd) into
+ * firmware video standard enum (VDECFW_eCodecType);
+ */
+static int translation_get_codec(enum vdec_vid_std evidstd,
+				 enum vdecfw_codectype *pecodec)
+{
+	enum vdecfw_codectype ecodec = VDEC_CODEC_NONE;
+	unsigned int result = IMG_ERROR_NOT_SUPPORTED;
+
+	/* Translate from video standard to firmware codec. */
+	switch (evidstd) {
+	#ifdef HAS_H264
+	case VDEC_STD_H264:
+		ecodec = VDECFW_CODEC_H264;
+		result = IMG_SUCCESS;
+		break;
+	#endif /* HAS_H264 */
+#ifdef HAS_HEVC
+	case VDEC_STD_HEVC:
+		ecodec = VDECFW_CODEC_HEVC;
+		result = IMG_SUCCESS;
+		break;
+#endif /* HAS_HEVC */
+#ifdef HAS_JPEG
+	case VDEC_STD_JPEG:
+		ecodec = VDECFW_CODEC_JPEG;
+		result = IMG_SUCCESS;
+		break;
+#endif
+	default:
+		result = IMG_ERROR_NOT_SUPPORTED;
+		break;
+	}
+	*pecodec = ecodec;
+	return result;
+}
+
+/*
+ * This function is used to obtain buffer for sequence header.
+ */
+static int translation_get_seqhdr(struct vdecdd_str_unit *psstrunit,
+				  struct dec_decpict *psdecpict,
+				  unsigned int *puipseqaddr)
+{
+	/*
+	 * ending Sequence info only if its a First Pic of Sequence, or a Start
+	 * of Closed GOP
+	 */
+	if (psstrunit->pict_hdr_info->first_pic_of_sequence || psstrunit->closed_gop) {
+		struct vdecdd_ddbuf_mapinfo *ddbuf_map_info;
+		/* Get access to map info context */
+		int result = rman_get_resource(psstrunit->seq_hdr_info->bufmap_id,
+					       VDECDD_BUFMAP_TYPE_ID,
+					       (void **)&ddbuf_map_info, NULL);
+		VDEC_ASSERT(result == IMG_SUCCESS);
+		if (result != IMG_SUCCESS)
+			return result;
+
+		*puipseqaddr = GET_HOST_ADDR_OFFSET(&ddbuf_map_info->ddbuf_info,
+						    psstrunit->seq_hdr_info->buf_offset);
+	} else {
+		*puipseqaddr = 0;
+	}
+	return IMG_SUCCESS;
+}
+
+/*
+ * This function is used to obtain buffer for picture parameter set.
+ */
+static int translation_get_ppshdr(struct vdecdd_str_unit *psstrunit,
+				  struct dec_decpict *psdecpict,
+				  unsigned int *puipppsaddr)
+{
+	if (psstrunit->pict_hdr_info->pict_aux_data.id != BSPP_INVALID) {
+		struct vdecdd_ddbuf_mapinfo *ddbuf_map_info;
+		int result;
+
+		VDEC_ASSERT(psstrunit->pict_hdr_info->pict_aux_data.pic_data);
+		/* Get access to map info context */
+		result = rman_get_resource(psstrunit->pict_hdr_info->pict_aux_data.bufmap_id,
+					   VDECDD_BUFMAP_TYPE_ID,
+					   (void **)&ddbuf_map_info, NULL);
+		VDEC_ASSERT(result == IMG_SUCCESS);
+
+		if (result != IMG_SUCCESS)
+			return result;
+		*puipppsaddr =
+			GET_HOST_ADDR_OFFSET(&ddbuf_map_info->ddbuf_info,
+					     psstrunit->pict_hdr_info->pict_aux_data.buf_offset);
+	} else {
+		*puipppsaddr = 0;
+	}
+	return IMG_SUCCESS;
+}
+
+/*
+ * This function is used to obtain buffer for second picture parameter set.
+ */
+static int translation_getsecond_ppshdr(struct vdecdd_str_unit *psstrunit,
+					unsigned int *puisecond_ppshdr)
+{
+	if (psstrunit->pict_hdr_info->second_pict_aux_data.id !=
+		BSPP_INVALID) {
+		struct vdecdd_ddbuf_mapinfo *ddbuf_map_info;
+		int result;
+		void *pic_data =
+			psstrunit->pict_hdr_info->second_pict_aux_data.pic_data;
+
+		VDEC_ASSERT(pic_data);
+		result = rman_get_resource(psstrunit->pict_hdr_info->second_pict_aux_data.bufmap_id,
+					   VDECDD_BUFMAP_TYPE_ID,
+					   (void **)&ddbuf_map_info, NULL);
+		VDEC_ASSERT(result == IMG_SUCCESS);
+
+		if (result != IMG_SUCCESS)
+			return result;
+
+		*puisecond_ppshdr =
+			GET_HOST_ADDR_OFFSET
+				(&ddbuf_map_info->ddbuf_info,
+				 psstrunit->pict_hdr_info->second_pict_aux_data.buf_offset);
+	} else {
+		*puisecond_ppshdr = 0;
+	}
+	return IMG_SUCCESS;
+}
+
+/*
+ * Returns address from which FW should download its shared context.
+ */
+static unsigned int translation_getctx_loadaddr(struct dec_decpict *psdecpict)
+{
+	if (psdecpict->prev_pict_dec_res)
+		return GET_HOST_ADDR(&psdecpict->prev_pict_dec_res->fw_ctx_buf);
+
+	/*
+	 * No previous context exists, using current context leads to
+	 * problems on replay so just say to FW to use clean one.
+	 * This is NULL as integer to avoid pointer size warnings due
+	 * to type casting.
+	 */
+	return 0;
+}
+
+static void translation_setup_std_header
+	(struct vdec_str_configdata *str_configdata,
+	struct dec_decpict *dec_pict,
+	struct vdecdd_str_unit *str_unit, unsigned int *psr_hdrsize,
+	struct vdecdd_picture *picture, unsigned int *picture_cmds,
+	enum vdecfw_parsermode *parser_mode)
+{
+	switch (str_configdata->vid_std) {
+#ifdef HAS_H264
+	case VDEC_STD_H264:
+	{
+		struct h264fw_header_data *header_data =
+			(struct h264fw_header_data *)
+			dec_pict->hdr_info->ddbuf_info->cpu_virt;
+		*parser_mode = str_unit->pict_hdr_info->parser_mode;
+
+		if (str_unit->pict_hdr_info->parser_mode !=
+			VDECFW_SCP_ONLY) {
+			pr_warn("VDECFW_SCP_ONLY mode supported in PVDEC FW\n");
+		}
+		/* Reset header data. */
+		memset(header_data, 0, sizeof(*(header_data)));
+
+		/* Prepare active parameter sets. */
+		translation_h264header(picture, dec_pict, header_data, str_configdata);
+
+		/* Setup header size in the transaction. */
+		*psr_hdrsize = sizeof(struct h264fw_header_data);
+		break;
+	}
+#endif /* HAS_H264 */
+
+#ifdef HAS_HEVC
+	case VDEC_STD_HEVC:
+	{
+		struct hevcfw_headerdata *header_data =
+			(struct hevcfw_headerdata *)dec_pict->hdr_info->ddbuf_info->cpu_virt;
+		*parser_mode = str_unit->pict_hdr_info->parser_mode;
+
+		/* Reset header data. */
+		memset(header_data, 0, sizeof(*header_data));
+
+		/* Prepare active parameter sets. */
+		translation_hevc_header(picture, dec_pict, header_data);
+
+		/* Setup header size in the transaction. */
+		*psr_hdrsize = sizeof(struct hevcfw_headerdata);
+		break;
+	}
+#endif
+#ifdef HAS_JPEG
+	case VDEC_STD_JPEG:
+	{
+		struct jpegfw_header_data *header_data =
+			(struct jpegfw_header_data *)dec_pict->hdr_info->ddbuf_info->cpu_virt;
+		const struct bspp_sequ_hdr_info *seq = str_unit->seq_hdr_info;
+		const struct bspp_pict_hdr_info *pict_hdr_info = str_unit->pict_hdr_info;
+
+		/* Reset header data. */
+		memset(header_data, 0, sizeof(*(header_data)));
+
+		/* Prepare active parameter sets. */
+		translation_jpegheader(seq, dec_pict, pict_hdr_info, header_data);
+
+		/* Setup header size in the transaction. */
+		*psr_hdrsize = sizeof(struct jpegfw_header_data);
+		break;
+	}
+#endif
+	default:
+		VDEC_ASSERT(NULL == "Unknown standard!");
+		*psr_hdrsize = 0;
+		break;
+	}
+}
+
+#define VDEC_INITIAL_DEVA_DMA_CMD_SIZE 3
+#define VDEC_SINLGE_DEVA_DMA_CMD_SIZE 2
+
+#ifdef VDEC_USE_PVDEC
+/*
+ * Creates DEVA bitstream segments command and saves is to control allocation
+ * buffer.
+ */
+static int translation_pvdec_adddma_transfers
+	(struct lst_t *decpic_seglist, unsigned int **dma_cmdbuf,
+	int cmd_bufsize, struct dec_decpict *psdecpict, int eop)
+{
+	/*
+	 * DEVA's bitstream DMA command is made out of chunks with following
+	 * layout ('+' sign is used to mark actual words in command):
+	 *
+	 * + Bitstream HDR, type unsigned int, consists of:
+	 *	- command id (CMD_BITSTREAM_SEGMENTS),
+	 *	- number of segments in this chunk,
+	 *	- optional CMD_BITSTREAM_SEGMENTS_MORE_FOLLOW_MASK
+	 *
+	 * + Bitstream total size, type unsigned int,
+	 * represents size of all segments in all chunks
+	 *
+	 * Segments of following type (can repeat up to
+	 * CMD_BITSTREAM_SEGMENTS_MINUS1_MASK + 1 times)
+	 *
+	 *	+ Bitstream segment address, type unsigned int
+	 *
+	 *	+ Bitstream segment size, type unsigned int
+	 *
+	 * Subsequent chunks are present when
+	 * CMD_BITSTREAM_SEGMENTS_MORE_FOLLOW_MASK flag is set in Bitstream HDR.
+	 */
+	struct dec_decpict_seg *dec_picseg = (struct dec_decpict_seg *)lst_first(decpic_seglist);
+	unsigned int *cmd = *dma_cmdbuf;
+	unsigned int *dma_hdr = cmd;
+	unsigned int segcount = 0;
+	unsigned int bitstream_size = 0;
+
+	/*
+	 * Two words for DMA command header (setup later as we need to find out
+	 * count of BS segments).
+	 */
+	cmd += CMD_BITSTREAM_HDR_DW_SIZE;
+	cmd_bufsize -= CMD_BITSTREAM_HDR_DW_SIZE;
+	if (cmd_bufsize < 0) {
+		pr_err("Buffer for DMA command too small.\n");
+		return IMG_ERROR_INVALID_PARAMETERS;
+	}
+
+	if (!dec_picseg) {
+		/* No segments to be send to FW: preparing fake one */
+		cmd_bufsize -= VDEC_SINLGE_DEVA_DMA_CMD_SIZE;
+		if (cmd_bufsize < 0) {
+			pr_err("Buffer for DMA command too small.\n");
+			return IMG_ERROR_INVALID_PARAMETERS;
+		}
+		segcount++;
+
+		/* zeroing bitstream size and bitstream offset */
+		*(cmd++) = 0;
+		*(cmd++) = 0;
+	}
+
+	/* Loop through all bitstream segments */
+	while (dec_picseg) {
+		if (dec_picseg->bstr_seg && (dec_picseg->bstr_seg->bstr_seg_flag
+			& VDECDD_BSSEG_SKIP) == 0) {
+			unsigned int result;
+			struct vdecdd_ddbuf_mapinfo *ddbuf_map_info;
+
+			segcount++;
+			/* Two words for each added bitstream segment */
+			cmd_bufsize -= VDEC_SINLGE_DEVA_DMA_CMD_SIZE;
+			if (cmd_bufsize < 0) {
+				pr_err("Buffer for DMA command too small.\n");
+				return IMG_ERROR_INVALID_PARAMETERS;
+			}
+			/* Insert SCP/SC if needed */
+			if (dec_picseg->bstr_seg->bstr_seg_flag &
+				VDECDD_BSSEG_INSERTSCP) {
+				unsigned int startcode_length =
+					psdecpict->start_code_bufinfo->buf_size;
+
+				if (dec_picseg->bstr_seg->bstr_seg_flag &
+					VDECDD_BSSEG_INSERT_STARTCODE) {
+					unsigned char *start_code =
+						psdecpict->start_code_bufinfo->cpu_virt;
+					start_code[startcode_length - 1] =
+						dec_picseg->bstr_seg->start_code_suffix;
+				} else {
+					startcode_length -= 1;
+				}
+
+				segcount++;
+				*(cmd++) = startcode_length;
+				bitstream_size += startcode_length;
+
+				*(cmd++) = psdecpict->start_code_bufinfo->dev_virt;
+
+				if (((segcount %
+					(CMD_BITSTREAM_SEGMENTS_MINUS1_MASK + 1)) == 0))
+					/*
+					 * we have reached max number of
+					 * bitstream segments for current
+					 * command make pui32Cmd point to next
+					 * BS command
+					 */
+					cmd += CMD_BITSTREAM_HDR_DW_SIZE;
+			}
+			/* Get access to map info context */
+			result = rman_get_resource(dec_picseg->bstr_seg->bufmap_id,
+						   VDECDD_BUFMAP_TYPE_ID,
+						   (void **)&ddbuf_map_info, NULL);
+			VDEC_ASSERT(result == IMG_SUCCESS);
+			if (result != IMG_SUCCESS)
+				return result;
+
+			*(cmd++) = (dec_picseg->bstr_seg->data_size);
+			bitstream_size += dec_picseg->bstr_seg->data_size;
+
+			*(cmd++) = ddbuf_map_info->ddbuf_info.dev_virt +
+				dec_picseg->bstr_seg->data_byte_offset;
+
+			if (((segcount %
+				(CMD_BITSTREAM_SEGMENTS_MINUS1_MASK + 1)) == 0) &&
+				(lst_next(dec_picseg)))
+				/*
+				 * we have reached max number of bitstream
+				 * segments for current command make pui32Cmd
+				 * point to next BS command
+				 */
+				cmd += CMD_BITSTREAM_HDR_DW_SIZE;
+		}
+		dec_picseg = lst_next(dec_picseg);
+	}
+
+	if (segcount > CMD_BITSTREAM_SEGMENTS_MAX_NUM) {
+		pr_err("Too many bitstream segments to transfer.\n");
+		return IMG_ERROR_INVALID_PARAMETERS;
+	}
+
+	while (segcount > (CMD_BITSTREAM_SEGMENTS_MINUS1_MASK + 1)) {
+		*dma_hdr++ = CMD_BITSTREAM_SEGMENTS |
+			CMD_BITSTREAM_SEGMENTS_MORE_FOLLOW_MASK |
+			CMD_BITSTREAM_SEGMENTS_MINUS1_MASK;
+		*dma_hdr++ = bitstream_size;
+		/*
+		 * make pui32DmaHdr point to next chunk by skipping bitstream
+		 * Segments
+		 */
+		dma_hdr += (2 * (CMD_BITSTREAM_SEGMENTS_MINUS1_MASK + 1));
+		segcount -= (CMD_BITSTREAM_SEGMENTS_MINUS1_MASK + 1);
+	}
+	*dma_hdr = eop ? CMD_BITSTREAM_EOP_MASK : 0;
+	*dma_hdr++ |= CMD_BITSTREAM_SEGMENTS | (segcount - 1);
+	*dma_hdr = bitstream_size;
+
+	/*
+	 * Let caller know where we finished. Pointer to location one word after
+	 * end of our command buffer
+	 */
+	*dma_cmdbuf = cmd;
+	return IMG_SUCCESS;
+}
+
+/*
+ * Creates DEVA control allocation buffer header.
+ */
+static void translation_pvdec_ctrl_setuphdr
+	(struct ctrl_alloc_header *ctrlalloc_hdr,
+	unsigned int *pic_cmds)
+{
+	ctrlalloc_hdr->cmd_additional_params = CMD_CTRL_ALLOC_HEADER;
+	ctrlalloc_hdr->ext_opmode = pic_cmds[VDECFW_CMD_EXT_OP_MODE];
+	ctrlalloc_hdr->chroma_strides =
+		pic_cmds[VDECFW_CMD_CHROMA_ROW_STRIDE];
+	ctrlalloc_hdr->alt_output_addr[0] =
+		pic_cmds[VDECFW_CMD_LUMA_ALTERNATIVE_PICTURE_BASE_ADDRESS];
+	ctrlalloc_hdr->alt_output_addr[1] =
+		pic_cmds[VDECFW_CMD_CHROMA_ALTERNATIVE_PICTURE_BASE_ADDRESS];
+	ctrlalloc_hdr->alt_output_flags =
+		pic_cmds[VDECFW_CMD_ALTERNATIVE_OUTPUT_PICTURE_ROTATION];
+}
+
+/*
+ * Creates DEVA VLC DMA command and saves is to control allocation buffer.
+ */
+static int translation_pvdecsetup_vlcdma
+	(struct vidio_ddbufinfo *vlctables_bufinfo,
+	unsigned int **dmacmd_buf, unsigned int cmdbuf_size)
+{
+	unsigned int cmd_dma;
+	unsigned int *cmd = *dmacmd_buf;
+
+	/* Check if VLC tables fit in one DMA transfer */
+	if (vlctables_bufinfo->buf_size > CMD_DMA_DMA_SIZE_MASK) {
+		pr_err("VLC tables won't fit into one DMA transfer!\n");
+		return IMG_ERROR_INVALID_PARAMETERS;
+	}
+
+	/* Check if we have enough space in control allocation buffer. */
+	if (cmdbuf_size < VDEC_SINLGE_DEVA_DMA_CMD_SIZE) {
+		pr_err("Buffer for DMA command too small.\n");
+		return IMG_ERROR_INVALID_PARAMETERS;
+	}
+
+	/* Construct DMA command */
+	cmd_dma = CMD_DMA | CMD_DMA_TYPE_VLC_TABLE |
+		vlctables_bufinfo->buf_size;
+
+	/* Add command to control allocation */
+	*cmd++ = cmd_dma;
+	*cmd++ = vlctables_bufinfo->dev_virt;
+
+	/*
+	 * Let caller know where we finished. Pointer to location one word after
+	 * end of our command buffer
+	 */
+	*dmacmd_buf = cmd;
+	return IMG_SUCCESS;
+}
+
+/*
+ * Creates DEVA commands for configuring VLC tables and saves them into
+ * control allocation buffer.
+ */
+static int translation_pvdecsetup_vlctables
+	(unsigned short vlc_index_data[][3], unsigned int num_tables,
+	unsigned int **ctrl_allocbuf, unsigned int ctrl_allocsize,
+	unsigned int msvdx_vecoffset)
+{
+	unsigned int i;
+	unsigned int word_count;
+	unsigned int reg_val;
+	unsigned int *ctrl_allochdr;
+
+	unsigned int *ctrl_alloc = *ctrl_allocbuf;
+
+	/* Calculate the number of words needed for VLC control allocations. */
+	/*
+	 * 3 words for control allocation headers (we are writing 3 chunks:
+	 * addresses, widths, opcodes)
+	 */
+	unsigned int req_elems = 3 +
+		(ALIGN(num_tables, PVDECIO_VLC_IDX_WIDTH_PARTS) /
+		PVDECIO_VLC_IDX_WIDTH_PARTS) +
+		(ALIGN(num_tables, PVDECIO_VLC_IDX_ADDR_PARTS) /
+		PVDECIO_VLC_IDX_ADDR_PARTS) +
+		(ALIGN(num_tables, PVDECIO_VLC_IDX_OPCODE_PARTS) /
+		PVDECIO_VLC_IDX_OPCODE_PARTS);
+
+	/*
+	 * Addresses chunk has to be split in two, if number of tables exceeds
+	 * VEC_VLC_TABLE_ADDR_DISCONT (see layout of VEC_VLC_TABLE_ADDR*
+	 * registers in TRM)
+	 */
+	if (num_tables > VEC_VLC_TABLE_ADDR_DISCONT)
+		/* We need additional control allocation header */
+		req_elems += 1;
+
+	if (ctrl_allocsize < req_elems) {
+		pr_err("Buffer for VLC IDX commands too small.\n");
+		return IMG_ERROR_INVALID_PARAMETERS;
+	}
+
+	/*
+	 * Write VLC IDX addresses. Chunks for VEC_VLC_TABLE_ADDR[0-15] and
+	 * VEC_VLC_TABLE_ADDR[16-18] registers.
+	 */
+	ctrl_allochdr = ctrl_alloc++;
+	*ctrl_allochdr = CMD_REGISTER_BLOCK | CMD_REGISTER_BLOCK_FLAG_VLC_DATA |
+		(MSVDX_VEC_CR_VEC_VLC_TABLE_ADDR0_OFFSET + msvdx_vecoffset);
+	/* Reset the word count. */
+	word_count = 0;
+
+	/* Process VLC index table. */
+	i = 0;
+	reg_val = 0;
+	while (i < num_tables) {
+		VDEC_ASSERT((vlc_index_data[i][PVDECIO_VLC_IDX_ADDR_ID] &
+			~PVDECIO_VLC_IDX_ADDR_MASK) == 0);
+		/* Pack the addresses into a word. */
+		reg_val |= ((vlc_index_data[i][PVDECIO_VLC_IDX_ADDR_ID] &
+			PVDECIO_VLC_IDX_ADDR_MASK) <<
+			((i % PVDECIO_VLC_IDX_ADDR_PARTS) *
+			PVDECIO_VLC_IDX_ADDR_SHIFT));
+
+		/* If we reached the end of VEC_VLC_TABLE_ADDR[0-15] area... */
+		if (i == VEC_VLC_TABLE_ADDR_DISCONT) {
+			/*
+			 * Finalize command header for VEC_VLC_TABLE_ADDR[0-15]
+			 * register chunk.
+			 */
+			*ctrl_allochdr |= word_count << 16;
+			/*
+			 * Reserve and preset command header for
+			 * VEC_VLC_TABLE_ADDR[16-18] register chunk.
+			 */
+			ctrl_allochdr = ctrl_alloc++;
+			*ctrl_allochdr = CMD_REGISTER_BLOCK |
+				CMD_REGISTER_BLOCK_FLAG_VLC_DATA |
+				(MSVDX_VEC_CR_VEC_VLC_TABLE_ADDR16_OFFSET +
+				msvdx_vecoffset);
+			/* Reset the word count. */
+			word_count = 0;
+		}
+
+		/*
+		 * If all the addresses are packed in this word or that's the
+		 * last iteration
+		 */
+		if (((i % PVDECIO_VLC_IDX_ADDR_PARTS) ==
+			(PVDECIO_VLC_IDX_ADDR_PARTS - 1)) ||
+			(i == (num_tables - 1))) {
+			/*
+			 * Add VLC table address to this chunk and increase
+			 * words count.
+			 */
+			*ctrl_alloc++ = reg_val;
+			word_count++;
+			/* Reset address value. */
+			reg_val = 0;
+		}
+
+		i++;
+	}
+
+	/*
+	 * Finalize the current command header for VEC_VLC_TABLE_ADDR register
+	 * chunk.
+	 */
+	*ctrl_allochdr |= word_count << 16;
+
+	/*
+	 * Start new commands chunk for VEC_VLC_TABLE_INITIAL_WIDTH[0-3]
+	 * registers.
+	 */
+
+	/*
+	 * Reserve and preset command header for
+	 * VEC_VLC_TABLE_INITIAL_WIDTH[0-3] register chunk.
+	 */
+	ctrl_allochdr = ctrl_alloc++;
+	*ctrl_allochdr = CMD_REGISTER_BLOCK | CMD_REGISTER_BLOCK_FLAG_VLC_DATA |
+		(MSVDX_VEC_CR_VEC_VLC_TABLE_INITIAL_WIDTH0_OFFSET +
+		msvdx_vecoffset);
+	/* Reset the word count. */
+	word_count = 0;
+
+	/* Process VLC index table. */
+	i = 0;
+	reg_val = 0;
+
+	while (i < num_tables) {
+		VDEC_ASSERT((vlc_index_data[i][PVDECIO_VLC_IDX_WIDTH_ID] &
+			~PVDECIO_VLC_IDX_WIDTH_MASK) == 0);
+		/* Pack the widths into a word. */
+		reg_val |= ((vlc_index_data[i][PVDECIO_VLC_IDX_WIDTH_ID] &
+			PVDECIO_VLC_IDX_WIDTH_MASK) <<
+			(i % PVDECIO_VLC_IDX_WIDTH_PARTS) *
+			PVDECIO_VLC_IDX_WIDTH_SHIFT);
+
+		/*
+		 * If all the widths are packed in this word or that's the last
+		 * iteration.
+		 */
+		if (((i % PVDECIO_VLC_IDX_WIDTH_PARTS) ==
+			(PVDECIO_VLC_IDX_WIDTH_PARTS - 1)) ||
+			(i == (num_tables - 1))) {
+			/*
+			 * Add VLC table width to this chunk and increase words
+			 * count.
+			 */
+			*ctrl_alloc++ = reg_val;
+			word_count++;
+			/* Reset width value. */
+			reg_val = 0;
+		}
+		i++;
+	}
+
+	/*
+	 * Finalize command header for VEC_VLC_TABLE_INITIAL_WIDTH[0-3] register
+	 * chunk.
+	 */
+	*ctrl_allochdr |= word_count << 16;
+
+	/*
+	 * Start new commands chunk for VEC_VLC_TABLE_INITIAL_OPCODE[0-2]
+	 * registers.
+	 * Reserve and preset command header for
+	 * VEC_VLC_TABLE_INITIAL_OPCODE[0-2] register chunk
+	 */
+	ctrl_allochdr = ctrl_alloc++;
+	*ctrl_allochdr = CMD_REGISTER_BLOCK | CMD_REGISTER_BLOCK_FLAG_VLC_DATA |
+		(MSVDX_VEC_CR_VEC_VLC_TABLE_INITIAL_OPCODE0_OFFSET +
+		msvdx_vecoffset);
+	/* Reset the word count. */
+	word_count = 0;
+
+	/* Process VLC index table. */
+	i = 0;
+	reg_val = 0;
+
+	while (i < num_tables) {
+		VDEC_ASSERT((vlc_index_data[i][PVDECIO_VLC_IDX_OPCODE_ID] &
+			~PVDECIO_VLC_IDX_OPCODE_MASK) == 0);
+		/* Pack the opcodes into a word. */
+		reg_val |= ((vlc_index_data[i][PVDECIO_VLC_IDX_OPCODE_ID] &
+			PVDECIO_VLC_IDX_OPCODE_MASK) <<
+			(i % PVDECIO_VLC_IDX_OPCODE_PARTS) *
+			PVDECIO_VLC_IDX_OPCODE_SHIFT);
+
+		/*
+		 * If all the opcodes are packed in this word or that's the last
+		 * iteration.
+		 */
+		if (((i % PVDECIO_VLC_IDX_OPCODE_PARTS) ==
+			(PVDECIO_VLC_IDX_OPCODE_PARTS - 1)) ||
+			(i == (num_tables - 1))) {
+			/*
+			 * Add VLC table opcodes to this chunk and increase
+			 * words count.
+			 */
+			*ctrl_alloc++ = reg_val;
+			word_count++;
+			/* Reset width value. */
+			reg_val = 0;
+		}
+		i++;
+	}
+
+	/*
+	 * Finalize command header for VEC_VLC_TABLE_INITIAL_OPCODE[0-2]
+	 * register chunk.
+	 */
+	*ctrl_allochdr |= word_count << 16;
+
+	/* Update caller with current location of control allocation pointer */
+	*ctrl_allocbuf = ctrl_alloc;
+	return IMG_SUCCESS;
+}
+
+/*
+ * fills in a rendec command chunk in the command buffer.
+ */
+static void fill_rendec_chunk(int num, ...)
+{
+	va_list valist;
+	unsigned int i, j = 0;
+	unsigned int chunk_word_count = 0;
+	unsigned int used_word_count = 0;
+	int aux_array_size = 0;
+	unsigned int *pic_cmds;
+	unsigned int **ctrl_allocbuf;
+	unsigned int ctrl_allocsize;
+	unsigned int vdmc_cmd_offset;
+	unsigned int offset;
+	unsigned int *buf;
+	/* 5 is the fixed arguments passed to fill_rendec_chunk function */
+	enum vdecfw_picture_cmds *aux_array = kmalloc((sizeof(unsigned int) *
+			(num - 5)), GFP_KERNEL);
+	if (!aux_array)
+		return;
+
+	/* initialize valist for num number of arguments */
+	va_start(valist, num);
+
+	pic_cmds = va_arg(valist, unsigned int *);
+	ctrl_allocbuf = va_arg(valist, unsigned int **);
+	ctrl_allocsize = va_arg(valist, unsigned int);
+	vdmc_cmd_offset = va_arg(valist, unsigned int);
+	offset = va_arg(valist, unsigned int);
+	buf = *ctrl_allocbuf;
+
+	aux_array_size = (sizeof(unsigned int) * (num - 5));
+	/*
+	 * access all the arguments assigned to valist, we have already
+	 * read till 5
+	 */
+	for (i = 6, j = 0; i <= num; i++, j++)
+		aux_array[j] = (enum vdecfw_picture_cmds)va_arg(valist, int);
+
+	/* clean memory reserved for valist */
+	va_end(valist);
+	chunk_word_count = aux_array_size /
+		sizeof(enum vdecfw_picture_cmds);
+	if ((chunk_word_count + 1) > (ctrl_allocsize - used_word_count)) {
+		kfree(aux_array);
+		return;
+	}
+	if ((chunk_word_count & ~(CMD_RENDEC_WORD_COUNT_MASK >>
+		CMD_RENDEC_WORD_COUNT_SHIFT)) != 0) {
+		kfree(aux_array);
+		return;
+	}
+	used_word_count += chunk_word_count + 1;
+	*buf++ = CMD_RENDEC_BLOCK | (chunk_word_count << 16) |
+		(vdmc_cmd_offset + offset);
+
+	for (i = 0; i < chunk_word_count; i++)
+		*buf++ = pic_cmds[aux_array[i]];
+
+	*ctrl_allocbuf = buf;
+	/* free the memory */
+	kfree(aux_array);
+}
+
+/*
+ * Creates DEVA commands for configuring rendec and writes them into control
+ * allocation buffer.
+ */
+static void translation_pvdec_setup_commands(unsigned int *pic_cmds,
+					     unsigned int **ctrl_allocbuf,
+					     unsigned int ctrl_allocsize,
+					     unsigned int vdmc_cmd_offset)
+{
+	unsigned int codec_mode;
+
+	codec_mode = REGIO_READ_FIELD(pic_cmds[VDECFW_CMD_OPERATING_MODE],
+				      MSVDX_CMDS, OPERATING_MODE, CODEC_MODE);
+
+	if (codec_mode != CODEC_MODE_H264)
+		/* chunk with cache settings at 0x01C */
+		/*
+		 * here first argument 6 says there are 6 number of arguments
+		 * being passed to fill_rendec_chunk function.
+		 */
+		fill_rendec_chunk(6, pic_cmds, ctrl_allocbuf, ctrl_allocsize,
+				  vdmc_cmd_offset,
+				  MSVDX_CMDS_MC_CACHE_CONFIGURATION_OFFSET,
+				  VDECFW_CMD_MC_CACHE_CONFIGURATION);
+
+	/* chunk with extended row stride at 0x03C */
+	/*
+	 * here first argument 6 says there are 6 number of arguments
+	 * being passed to fill_rendec_chunk function.
+	 */
+	fill_rendec_chunk(6, pic_cmds, ctrl_allocbuf, ctrl_allocsize,
+			  vdmc_cmd_offset,
+			  MSVDX_CMDS_EXTENDED_ROW_STRIDE_OFFSET,
+			  VDECFW_CMD_EXTENDED_ROW_STRIDE);
+
+	/* chunk with alternative output control at 0x1B4 */
+	/*
+	 * here first argument 6 says there are 6 number of arguments
+	 * being passed to fill_rendec_chunk function.
+	 */
+	fill_rendec_chunk(6, pic_cmds, ctrl_allocbuf, ctrl_allocsize,
+			  vdmc_cmd_offset,
+			  MSVDX_CMDS_ALTERNATIVE_OUTPUT_CONTROL_OFFSET,
+			  VDECFW_CMD_ALTERNATIVE_OUTPUT_CONTROL);
+
+	/* scaling chunks */
+	if (pic_cmds[VDECFW_CMD_SCALED_DISPLAY_SIZE]) {
+		if (codec_mode != CODEC_MODE_REAL8 && codec_mode != CODEC_MODE_REAL9) {
+			/*
+			 * chunk with scale display size, scale H/V control at
+			 * 0x0050
+			 */
+			/*
+			 * here first argument 8 says there are 8 number of
+			 * arguments being passed to fill_rendec_chunk function.
+			 */
+			fill_rendec_chunk(8, pic_cmds, ctrl_allocbuf,
+					  ctrl_allocsize, vdmc_cmd_offset,
+					  MSVDX_CMDS_SCALED_DISPLAY_SIZE_OFFSET,
+					  VDECFW_CMD_SCALED_DISPLAY_SIZE,
+					  VDECFW_CMD_HORIZONTAL_SCALE_CONTROL,
+					  VDECFW_CMD_VERTICAL_SCALE_CONTROL);
+
+			/* chunk with luma/chorma H/V coeffs at 0x0060 */
+			/*
+			 * here first argument 21 says there are 21 number of
+			 * arguments being passed to fill_rendec_chunk function.
+			 */
+			fill_rendec_chunk(21, pic_cmds, ctrl_allocbuf,
+					  ctrl_allocsize, vdmc_cmd_offset,
+					  MSVDX_CMDS_HORIZONTAL_LUMA_COEFFICIENTS_OFFSET,
+					  VDECFW_CMD_HORIZONTAL_LUMA_COEFFICIENTS_0,
+					  VDECFW_CMD_HORIZONTAL_LUMA_COEFFICIENTS_1,
+					  VDECFW_CMD_HORIZONTAL_LUMA_COEFFICIENTS_2,
+					  VDECFW_CMD_HORIZONTAL_LUMA_COEFFICIENTS_3,
+					  VDECFW_CMD_VERTICAL_LUMA_COEFFICIENTS_0,
+					  VDECFW_CMD_VERTICAL_LUMA_COEFFICIENTS_1,
+					  VDECFW_CMD_VERTICAL_LUMA_COEFFICIENTS_2,
+					  VDECFW_CMD_VERTICAL_LUMA_COEFFICIENTS_3,
+					  VDECFW_CMD_HORIZONTAL_CHROMA_COEFFICIENTS_0,
+					  VDECFW_CMD_HORIZONTAL_CHROMA_COEFFICIENTS_1,
+					  VDECFW_CMD_HORIZONTAL_CHROMA_COEFFICIENTS_2,
+					  VDECFW_CMD_HORIZONTAL_CHROMA_COEFFICIENTS_3,
+					  VDECFW_CMD_VERTICAL_CHROMA_COEFFICIENTS_0,
+					  VDECFW_CMD_VERTICAL_CHROMA_COEFFICIENTS_1,
+					  VDECFW_CMD_VERTICAL_CHROMA_COEFFICIENTS_2,
+					  VDECFW_CMD_VERTICAL_CHROMA_COEFFICIENTS_3);
+
+			/*
+			 * chunk with scale output size, scale H/V chroma at
+			 * 0x01B8
+			 */
+			/*
+			 * here first argument 8 says there are 8 number of
+			 * arguments being passed to fill_rendec_chunk function.
+			 */
+			fill_rendec_chunk(8, pic_cmds, ctrl_allocbuf,
+					  ctrl_allocsize, vdmc_cmd_offset,
+					  MSVDX_CMDS_SCALE_OUTPUT_SIZE_OFFSET,
+					  VDECFW_CMD_SCALE_OUTPUT_SIZE,
+					  VDECFW_CMD_SCALE_HORIZONTAL_CHROMA,
+					  VDECFW_CMD_SCALE_VERTICAL_CHROMA);
+		}
+	}
+}
+
+#ifdef HAS_HEVC
+/*
+ * @Function		translation_pvdec_setup_pvdec_commands
+ */
+static int translation_pvdec_setup_pvdec_commands(struct vdecdd_picture *picture,
+						  struct dec_decpict *dec_pict,
+						  struct vdecdd_str_unit *str_unit,
+						  struct decoder_regsoffsets *regs_offsets,
+						  unsigned int **ctrl_allocbuf,
+						  unsigned int ctrl_alloc_size,
+						  unsigned int *mem_to_reg_host_part,
+						  unsigned int *pict_cmds)
+{
+	const unsigned int genc_buf_cnt = 4;
+	/* We have two chunks: for GENC buffers addresses and sizes*/
+	const unsigned int genc_conf_items = 2;
+	const unsigned int pipe = 0xf << 16; /* Instruct H/W to write to current pipe */
+	/* We need to configure address and size of each GENC buffer */
+	const unsigned int genc_words_cnt = genc_buf_cnt * genc_conf_items;
+	struct vdecdd_ddbuf_mapinfo **genc_buffers =
+		picture->pict_res_int->seq_resint->genc_buffers;
+	unsigned int memto_reg_used;  /* in bytes */
+	unsigned int i;
+	unsigned int *ctrl_alloc = *ctrl_allocbuf;
+	unsigned int *mem_to_reg = (unsigned int *)dec_pict->pvdec_info->ddbuf_info->cpu_virt;
+	unsigned int reg = 0;
+
+	if (ctrl_alloc_size < genc_words_cnt + genc_conf_items) {
+		pr_err("Buffer for GENC config too small.");
+		return IMG_ERROR_INVALID_PARAMETERS;
+	}
+
+	/* Insert command header for GENC buffers sizes */
+	*ctrl_alloc++ = CMD_REGISTER_BLOCK | (genc_buf_cnt << 16) |
+		(PVDEC_ENTROPY_CR_GENC_BUFFER_SIZE_OFFSET + regs_offsets->entropy_offset);
+	for (i = 0; i < genc_buf_cnt; i++)
+		*ctrl_alloc++ = genc_buffers[i]->ddbuf_info.buf_size;
+
+	/* Insert command header for GENC buffers addresses */
+	*ctrl_alloc++ = CMD_REGISTER_BLOCK | (genc_buf_cnt << 16) |
+		(PVDEC_ENTROPY_CR_GENC_BUFFER_BASE_ADDRESS_OFFSET + regs_offsets->entropy_offset);
+	for (i = 0; i < genc_buf_cnt; i++)
+		*ctrl_alloc++ = genc_buffers[i]->ddbuf_info.dev_virt;
+
+	/* Insert GENC fragment buffer address */
+	*ctrl_alloc++ = CMD_REGISTER_BLOCK | (1 << 16) |
+		(PVDEC_ENTROPY_CR_GENC_FRAGMENT_BASE_ADDRESS_OFFSET + regs_offsets->entropy_offset);
+	*ctrl_alloc++ = picture->pict_res_int->genc_fragment_buf->ddbuf_info.dev_virt;
+
+	/* Return current location in control allocation buffer to caller */
+	*ctrl_allocbuf = ctrl_alloc;
+
+	reg = 0;
+	REGIO_WRITE_FIELD_LITE
+		(reg,
+		 MSVDX_CMDS, PVDEC_DISPLAY_PICTURE_SIZE, PVDEC_DISPLAY_PICTURE_WIDTH_MIN1,
+		 str_unit->pict_hdr_info->coded_frame_size.width - 1, unsigned int);
+	REGIO_WRITE_FIELD_LITE
+		(reg,
+		 MSVDX_CMDS, PVDEC_DISPLAY_PICTURE_SIZE, PVDEC_DISPLAY_PICTURE_HEIGHT_MIN1,
+		 str_unit->pict_hdr_info->coded_frame_size.height - 1, unsigned int);
+
+	/*
+	 * Pvdec operating mode needs to be submitted before any other commands.
+	 * This will be set in FW. Make sure it's the first command in Mem2Reg buffer.
+	 */
+	VDEC_ASSERT((unsigned int *)dec_pict->pvdec_info->ddbuf_info->cpu_virt == mem_to_reg);
+
+	*mem_to_reg++ = pipe |
+		(MSVDX_CMDS_PVDEC_OPERATING_MODE_OFFSET + regs_offsets->vdmc_cmd_offset);
+	*mem_to_reg++ = 0x0; /* has to be updated in the F/W */
+
+	*mem_to_reg++ = pipe |
+		(MSVDX_CMDS_MC_CACHE_CONFIGURATION_OFFSET + regs_offsets->vdmc_cmd_offset);
+	*mem_to_reg++ = 0x0; /* has to be updated in the F/W */
+
+	*mem_to_reg++ = pipe |
+		(MSVDX_CMDS_PVDEC_DISPLAY_PICTURE_SIZE_OFFSET + regs_offsets->vdmc_cmd_offset);
+	*mem_to_reg++ = reg;
+
+	*mem_to_reg++ = pipe |
+		(MSVDX_CMDS_PVDEC_CODED_PICTURE_SIZE_OFFSET + regs_offsets->vdmc_cmd_offset);
+	*mem_to_reg++ = reg;
+
+	/* scaling configuration */
+	if (pict_cmds[VDECFW_CMD_SCALED_DISPLAY_SIZE]) {
+		*mem_to_reg++ = pipe |
+			(MSVDX_CMDS_PVDEC_SCALED_DISPLAY_SIZE_OFFSET +
+			regs_offsets->vdmc_cmd_offset);
+		*mem_to_reg++ = pict_cmds[VDECFW_CMD_SCALED_DISPLAY_SIZE];
+
+		*mem_to_reg++ = pipe |
+			(MSVDX_CMDS_HORIZONTAL_SCALE_CONTROL_OFFSET +
+			 regs_offsets->vdmc_cmd_offset);
+		*mem_to_reg++ = pict_cmds[VDECFW_CMD_HORIZONTAL_SCALE_CONTROL];
+		*mem_to_reg++ = pipe |
+			(MSVDX_CMDS_VERTICAL_SCALE_CONTROL_OFFSET + regs_offsets->vdmc_cmd_offset);
+		*mem_to_reg++ = pict_cmds[VDECFW_CMD_VERTICAL_SCALE_CONTROL];
+
+		*mem_to_reg++ = pipe |
+			(MSVDX_CMDS_SCALE_OUTPUT_SIZE_OFFSET + regs_offsets->vdmc_cmd_offset);
+		*mem_to_reg++ = pict_cmds[VDECFW_CMD_SCALE_OUTPUT_SIZE];
+
+		*mem_to_reg++ = pipe |
+			(MSVDX_CMDS_SCALE_HORIZONTAL_CHROMA_OFFSET + regs_offsets->vdmc_cmd_offset);
+		*mem_to_reg++ = pict_cmds[VDECFW_CMD_SCALE_HORIZONTAL_CHROMA];
+		*mem_to_reg++ = pipe |
+			(MSVDX_CMDS_SCALE_VERTICAL_CHROMA_OFFSET + regs_offsets->vdmc_cmd_offset);
+		*mem_to_reg++ = pict_cmds[VDECFW_CMD_SCALE_VERTICAL_CHROMA];
+
+		*mem_to_reg++ = pipe |
+			(MSVDX_CMDS_HORIZONTAL_LUMA_COEFFICIENTS_OFFSET +
+			regs_offsets->vdmc_cmd_offset);
+		*mem_to_reg++ = pict_cmds[VDECFW_CMD_HORIZONTAL_LUMA_COEFFICIENTS_0];
+		*mem_to_reg++ = pipe |
+			(4 + MSVDX_CMDS_HORIZONTAL_LUMA_COEFFICIENTS_OFFSET +
+			regs_offsets->vdmc_cmd_offset);
+		*mem_to_reg++ = pict_cmds[VDECFW_CMD_HORIZONTAL_LUMA_COEFFICIENTS_1];
+		*mem_to_reg++ = pipe |
+			(8 + MSVDX_CMDS_HORIZONTAL_LUMA_COEFFICIENTS_OFFSET +
+			regs_offsets->vdmc_cmd_offset);
+		*mem_to_reg++ = pict_cmds[VDECFW_CMD_HORIZONTAL_LUMA_COEFFICIENTS_2];
+		*mem_to_reg++ = pipe |
+			(12 + MSVDX_CMDS_HORIZONTAL_LUMA_COEFFICIENTS_OFFSET +
+			regs_offsets->vdmc_cmd_offset);
+		*mem_to_reg++ = pict_cmds[VDECFW_CMD_HORIZONTAL_LUMA_COEFFICIENTS_3];
+
+		*mem_to_reg++ = pipe |
+			(MSVDX_CMDS_VERTICAL_LUMA_COEFFICIENTS_OFFSET +
+			regs_offsets->vdmc_cmd_offset);
+		*mem_to_reg++ = pict_cmds[VDECFW_CMD_VERTICAL_LUMA_COEFFICIENTS_0];
+		*mem_to_reg++ = pipe |
+			(4 + MSVDX_CMDS_VERTICAL_LUMA_COEFFICIENTS_OFFSET +
+			regs_offsets->vdmc_cmd_offset);
+		*mem_to_reg++ = pict_cmds[VDECFW_CMD_VERTICAL_LUMA_COEFFICIENTS_1];
+		*mem_to_reg++ = pipe |
+			(8 + MSVDX_CMDS_VERTICAL_LUMA_COEFFICIENTS_OFFSET +
+			regs_offsets->vdmc_cmd_offset);
+		*mem_to_reg++ = pict_cmds[VDECFW_CMD_VERTICAL_LUMA_COEFFICIENTS_2];
+		*mem_to_reg++ = pipe |
+			(12 + MSVDX_CMDS_VERTICAL_LUMA_COEFFICIENTS_OFFSET +
+			regs_offsets->vdmc_cmd_offset);
+		*mem_to_reg++ = pict_cmds[VDECFW_CMD_VERTICAL_LUMA_COEFFICIENTS_3];
+
+		*mem_to_reg++ = pipe |
+			(MSVDX_CMDS_HORIZONTAL_CHROMA_COEFFICIENTS_OFFSET +
+			regs_offsets->vdmc_cmd_offset);
+		*mem_to_reg++ = pict_cmds[VDECFW_CMD_HORIZONTAL_CHROMA_COEFFICIENTS_0];
+		*mem_to_reg++ = pipe |
+			(4 + MSVDX_CMDS_HORIZONTAL_CHROMA_COEFFICIENTS_OFFSET +
+			regs_offsets->vdmc_cmd_offset);
+		*mem_to_reg++ = pict_cmds[VDECFW_CMD_HORIZONTAL_CHROMA_COEFFICIENTS_1];
+		*mem_to_reg++ = pipe |
+			(8 + MSVDX_CMDS_HORIZONTAL_CHROMA_COEFFICIENTS_OFFSET +
+			regs_offsets->vdmc_cmd_offset);
+		*mem_to_reg++ = pict_cmds[VDECFW_CMD_HORIZONTAL_CHROMA_COEFFICIENTS_2];
+		*mem_to_reg++ = pipe |
+			(12 + MSVDX_CMDS_HORIZONTAL_CHROMA_COEFFICIENTS_OFFSET +
+			regs_offsets->vdmc_cmd_offset);
+		*mem_to_reg++ = pict_cmds[VDECFW_CMD_HORIZONTAL_CHROMA_COEFFICIENTS_3];
+
+		*mem_to_reg++ = pipe |
+			(MSVDX_CMDS_VERTICAL_CHROMA_COEFFICIENTS_OFFSET +
+			regs_offsets->vdmc_cmd_offset);
+		*mem_to_reg++ = pict_cmds[VDECFW_CMD_VERTICAL_CHROMA_COEFFICIENTS_0];
+		*mem_to_reg++ = pipe |
+			(4 + MSVDX_CMDS_VERTICAL_CHROMA_COEFFICIENTS_OFFSET +
+			regs_offsets->vdmc_cmd_offset);
+		*mem_to_reg++ = pict_cmds[VDECFW_CMD_VERTICAL_CHROMA_COEFFICIENTS_1];
+		*mem_to_reg++ = pipe |
+			(8 + MSVDX_CMDS_VERTICAL_CHROMA_COEFFICIENTS_OFFSET +
+			regs_offsets->vdmc_cmd_offset);
+		*mem_to_reg++ = pict_cmds[VDECFW_CMD_VERTICAL_CHROMA_COEFFICIENTS_2];
+		*mem_to_reg++ = pipe |
+			(12 + MSVDX_CMDS_VERTICAL_CHROMA_COEFFICIENTS_OFFSET +
+			regs_offsets->vdmc_cmd_offset);
+		*mem_to_reg++ = pict_cmds[VDECFW_CMD_VERTICAL_CHROMA_COEFFICIENTS_3];
+	}
+
+	*mem_to_reg++ = pipe |
+		(MSVDX_CMDS_EXTENDED_ROW_STRIDE_OFFSET + regs_offsets->vdmc_cmd_offset);
+	*mem_to_reg++ = pict_cmds[VDECFW_CMD_EXTENDED_ROW_STRIDE];
+
+	*mem_to_reg++ = pipe |
+		(MSVDX_CMDS_ALTERNATIVE_OUTPUT_CONTROL_OFFSET + regs_offsets->vdmc_cmd_offset);
+	*mem_to_reg++ = pict_cmds[VDECFW_CMD_ALTERNATIVE_OUTPUT_CONTROL];
+
+	*mem_to_reg++ = pipe |
+		(MSVDX_CMDS_ALTERNATIVE_OUTPUT_PICTURE_ROTATION_OFFSET +
+		regs_offsets->vdmc_cmd_offset);
+	*mem_to_reg++ = pict_cmds[VDECFW_CMD_ALTERNATIVE_OUTPUT_PICTURE_ROTATION];
+
+	*mem_to_reg++ = pipe |
+		(MSVDX_CMDS_CHROMA_ROW_STRIDE_OFFSET + regs_offsets->vdmc_cmd_offset);
+	*mem_to_reg++ = pict_cmds[VDECFW_CMD_CHROMA_ROW_STRIDE];
+
+	/* Setup MEM_TO_REG buffer */
+	for (i = 0; i < genc_buf_cnt; i++) {
+		*mem_to_reg++ = pipe | (PVDEC_VEC_BE_CR_GENC_BUFFER_SIZE_OFFSET +
+			regs_offsets->vec_be_regs_offset + i * sizeof(unsigned int));
+		*mem_to_reg++ = genc_buffers[i]->ddbuf_info.buf_size;
+		*mem_to_reg++ = pipe | (PVDEC_VEC_BE_CR_GENC_BUFFER_BASE_ADDRESS_OFFSET +
+			regs_offsets->vec_be_regs_offset + i * sizeof(unsigned int));
+		*mem_to_reg++ = genc_buffers[i]->ddbuf_info.dev_virt;
+	}
+
+	*mem_to_reg++ = pipe |
+		(PVDEC_VEC_BE_CR_GENC_FRAGMENT_BASE_ADDRESS_OFFSET +
+		regs_offsets->vec_be_regs_offset);
+	*mem_to_reg++ = picture->pict_res_int->genc_fragment_buf->ddbuf_info.dev_virt;
+
+	*mem_to_reg++ = pipe |
+		(PVDEC_VEC_BE_CR_ABOVE_PARAM_BASE_ADDRESS_OFFSET +
+		regs_offsets->vec_be_regs_offset);
+
+	*mem_to_reg++ = dec_pict->pvdec_info->ddbuf_info->dev_virt +
+		MEM_TO_REG_BUF_SIZE + SLICE_PARAMS_BUF_SIZE;
+
+	*mem_to_reg++ = pipe |
+		(MSVDX_CMDS_LUMA_RECONSTRUCTED_PICTURE_BASE_ADDRESSES_OFFSET +
+		regs_offsets->vdmc_cmd_offset);
+	*mem_to_reg++ = pict_cmds[VDECFW_CMD_LUMA_RECONSTRUCTED_PICTURE_BASE_ADDRESS];
+
+	*mem_to_reg++ = pipe |
+		(MSVDX_CMDS_CHROMA_RECONSTRUCTED_PICTURE_BASE_ADDRESSES_OFFSET +
+		regs_offsets->vdmc_cmd_offset);
+	*mem_to_reg++ = pict_cmds[VDECFW_CMD_CHROMA_RECONSTRUCTED_PICTURE_BASE_ADDRESS];
+
+	/* alternative picture configuration */
+	if (dec_pict->alt_pict) {
+		*mem_to_reg++ = pipe |
+			(MSVDX_CMDS_VC1_LUMA_RANGE_MAPPING_BASE_ADDRESS_OFFSET +
+			regs_offsets->vdmc_cmd_offset);
+		*mem_to_reg++ = pict_cmds[VDECFW_CMD_LUMA_ALTERNATIVE_PICTURE_BASE_ADDRESS];
+
+		*mem_to_reg++ = pipe |
+			(MSVDX_CMDS_VC1_CHROMA_RANGE_MAPPING_BASE_ADDRESS_OFFSET +
+			regs_offsets->vdmc_cmd_offset);
+		*mem_to_reg++ = pict_cmds[VDECFW_CMD_CHROMA_ALTERNATIVE_PICTURE_BASE_ADDRESS];
+	}
+
+	*mem_to_reg++ = pipe |
+		(MSVDX_CMDS_AUX_LINE_BUFFER_BASE_ADDRESS_OFFSET + regs_offsets->vdmc_cmd_offset);
+	*mem_to_reg++ = pict_cmds[VDECFW_CMD_AUX_LINE_BUFFER_BASE_ADDRESS];
+
+	*mem_to_reg++ = pipe |
+		(MSVDX_CMDS_INTRA_BUFFER_BASE_ADDRESS_OFFSET + regs_offsets->vdmc_cmd_offset);
+	*mem_to_reg++ = pict_cmds[VDECFW_CMD_INTRA_BUFFER_BASE_ADDRESS];
+
+	/* Make sure we fit in buffer */
+	memto_reg_used = (unsigned long)mem_to_reg -
+		(unsigned long)dec_pict->pvdec_info->ddbuf_info->cpu_virt;
+
+	VDEC_ASSERT(memto_reg_used < MEM_TO_REG_BUF_SIZE);
+
+	*mem_to_reg_host_part = memto_reg_used / sizeof(unsigned int);
+
+	return IMG_SUCCESS;
+}
+#endif
+
+/*
+ * Creates DEVA commands for configuring rendec and writes them into control
+ * allocation buffer.
+ */
+static int translation_pvdecsetup_vdecext
+	(struct vdec_ext_cmd *vdec_ext,
+	struct dec_decpict *dec_pict, unsigned int *pic_cmds,
+	struct vdecdd_str_unit *str_unit, enum vdec_vid_std vid_std,
+	enum vdecfw_parsermode parser_mode)
+{
+	int result;
+	unsigned int trans_id = dec_pict->transaction_id;
+
+	VDEC_ASSERT(dec_pict->recon_pict);
+
+	vdec_ext->cmd = CMD_VDEC_EXT;
+	vdec_ext->trans_id = trans_id;
+
+	result = translation_get_seqhdr(str_unit, dec_pict, &vdec_ext->seq_addr);
+	VDEC_ASSERT(result == IMG_SUCCESS);
+	if (result != IMG_SUCCESS)
+		return result;
+
+	result = translation_get_ppshdr(str_unit, dec_pict, &vdec_ext->pps_addr);
+	VDEC_ASSERT(result == IMG_SUCCESS);
+	if (result != IMG_SUCCESS)
+		return result;
+
+	result = translation_getsecond_ppshdr(str_unit, &vdec_ext->pps_2addr);
+	if (result != IMG_SUCCESS)
+		return result;
+
+	vdec_ext->hdr_addr = GET_HOST_ADDR(dec_pict->hdr_info->ddbuf_info);
+
+	vdec_ext->ctx_load_addr = translation_getctx_loadaddr(dec_pict);
+	vdec_ext->ctx_save_addr = GET_HOST_ADDR(&dec_pict->cur_pict_dec_res->fw_ctx_buf);
+	vdec_ext->buf_ctrl_addr = GET_HOST_ADDR(&dec_pict->pict_ref_res->fw_ctrlbuf);
+	if (dec_pict->prev_pict_dec_res) {
+		/*
+		 * Copy the previous firmware context to the current one in case
+		 * picture management fails in firmware.
+		 */
+		memcpy(dec_pict->cur_pict_dec_res->fw_ctx_buf.cpu_virt,
+		       dec_pict->prev_pict_dec_res->fw_ctx_buf.cpu_virt,
+		       dec_pict->prev_pict_dec_res->fw_ctx_buf.buf_size);
+	}
+
+	vdec_ext->last_luma_recon =
+		pic_cmds[VDECFW_CMD_LUMA_RECONSTRUCTED_PICTURE_BASE_ADDRESS];
+	vdec_ext->last_chroma_recon =
+		pic_cmds[VDECFW_CMD_CHROMA_RECONSTRUCTED_PICTURE_BASE_ADDRESS];
+
+	vdec_ext->luma_err_base =
+		pic_cmds[VDECFW_CMD_LUMA_ERROR_PICTURE_BASE_ADDRESS];
+	vdec_ext->chroma_err_base =
+		pic_cmds[VDECFW_CMD_CHROMA_ERROR_PICTURE_BASE_ADDRESS];
+
+	vdec_ext->scaled_display_size =
+		pic_cmds[VDECFW_CMD_SCALED_DISPLAY_SIZE];
+	vdec_ext->horz_scale_control =
+		pic_cmds[VDECFW_CMD_HORIZONTAL_SCALE_CONTROL];
+	vdec_ext->vert_scale_control =
+		pic_cmds[VDECFW_CMD_VERTICAL_SCALE_CONTROL];
+	vdec_ext->scale_output_size = pic_cmds[VDECFW_CMD_SCALE_OUTPUT_SIZE];
+
+	vdec_ext->intra_buf_base_addr =
+		pic_cmds[VDECFW_CMD_INTRA_BUFFER_BASE_ADDRESS];
+	vdec_ext->intra_buf_size_per_pipe =
+		pic_cmds[VDECFW_CMD_INTRA_BUFFER_SIZE_PER_PIPE];
+	vdec_ext->intra_buf_size_per_plane =
+		pic_cmds[VDECFW_CMD_INTRA_BUFFER_PLANE_SIZE];
+	vdec_ext->aux_line_buffer_base_addr =
+		pic_cmds[VDECFW_CMD_AUX_LINE_BUFFER_BASE_ADDRESS];
+	vdec_ext->aux_line_buf_size_per_pipe =
+		pic_cmds[VDECFW_CMD_AUX_LINE_BUFFER_SIZE_PER_PIPE];
+	vdec_ext->alt_output_pict_rotation =
+		pic_cmds[VDECFW_CMD_ALTERNATIVE_OUTPUT_PICTURE_ROTATION];
+	vdec_ext->chroma2reconstructed_addr =
+		pic_cmds[VDECFW_CMD_CHROMA2_RECONSTRUCTED_PICTURE_BASE_ADDRESS];
+	vdec_ext->luma_alt_addr =
+		pic_cmds[VDECFW_CMD_LUMA_ALTERNATIVE_PICTURE_BASE_ADDRESS];
+	vdec_ext->chroma_alt_addr =
+		pic_cmds[VDECFW_CMD_CHROMA_ALTERNATIVE_PICTURE_BASE_ADDRESS];
+	vdec_ext->chroma2alt_addr =
+		pic_cmds[VDECFW_CMD_CHROMA2_ALTERNATIVE_PICTURE_BASE_ADDRESS];
+
+	if (vid_std == VDEC_STD_VC1) {
+		struct vidio_ddbufinfo *vlc_idx_tables_bufinfo =
+			dec_pict->vlc_idx_tables_bufinfo;
+		struct vidio_ddbufinfo *vlc_tables_bufinfo =
+			dec_pict->vlc_tables_bufinfo;
+
+		vdec_ext->vlc_idx_table_size = vlc_idx_tables_bufinfo->buf_size;
+		vdec_ext->vlc_idx_table_addr = vlc_idx_tables_bufinfo->buf_size;
+		vdec_ext->vlc_tables_size = vlc_tables_bufinfo->buf_size;
+		vdec_ext->vlc_tables_size = vlc_tables_bufinfo->buf_size;
+	} else {
+		vdec_ext->vlc_idx_table_size = 0;
+		vdec_ext->vlc_idx_table_addr = 0;
+		vdec_ext->vlc_tables_size = 0;
+		vdec_ext->vlc_tables_size = 0;
+	}
+
+	vdec_ext->display_picture_size = pic_cmds[VDECFW_CMD_DISPLAY_PICTURE];
+	vdec_ext->parser_mode = parser_mode;
+
+	/* miscellaneous flags */
+	vdec_ext->is_chromainterleaved =
+		REGIO_READ_FIELD(pic_cmds[VDECFW_CMD_OPERATING_MODE], MSVDX_CMDS, OPERATING_MODE,
+				 CHROMA_INTERLEAVED);
+	vdec_ext->is_discontinuousmbs =
+		dec_pict->pict_hdr_info->discontinuous_mbs;
+
+#ifdef HAS_HEVC
+	if (dec_pict->pvdec_info) {
+		vdec_ext->mem_to_reg_addr = dec_pict->pvdec_info->ddbuf_info->dev_virt;
+		vdec_ext->slice_params_addr = dec_pict->pvdec_info->ddbuf_info->dev_virt +
+			MEM_TO_REG_BUF_SIZE;
+		vdec_ext->slice_params_size = SLICE_PARAMS_BUF_SIZE;
+	}
+	if (vid_std == VDEC_STD_HEVC) {
+		struct vdecdd_picture *picture = (struct vdecdd_picture *)str_unit->dd_pict_data;
+
+		VDEC_ASSERT(picture);
+		/* 10-bit packed output format indicator */
+		vdec_ext->is_packedformat = picture->op_config.pixel_info.mem_pkg ==
+			PIXEL_BIT10_MP ? 1 : 0;
+	}
+#endif
+	return IMG_SUCCESS;
+}
+
+/*
+ * NOTE :
+ * translation_configure_tiling is not supported as of now.
+ */
+int translation_ctrl_alloc_prepare(struct vdec_str_configdata *pstr_config_data,
+				   struct vdecdd_str_unit *str_unit,
+				   struct dec_decpict *dec_pict,
+				   const struct vxd_coreprops *core_props,
+				   struct decoder_regsoffsets *regs_offset)
+{
+	int result;
+	unsigned int *cmd_buf;
+	unsigned int hdr_size = 0;
+	unsigned int pict_cmds[VDECFW_CMD_MAX];
+	enum vdecfw_codectype codec;
+	struct vxd_buffers buffers;
+	struct vdec_ext_cmd *vdec_ext;
+	enum vdecfw_parsermode parser_mode = VDECFW_SCP_ONLY;
+	struct vidio_ddbufinfo *batch_msgbuf_info =
+		dec_pict->batch_msginfo->ddbuf_info;
+	struct lst_t *decpic_seg_list = &dec_pict->dec_pict_seg_list;
+	unsigned int memto_reg_host_part = 0;
+
+	unsigned long ctrl_alloc = (unsigned long)batch_msgbuf_info->cpu_virt;
+	unsigned long ctrl_alloc_end = ctrl_alloc + batch_msgbuf_info->buf_size;
+
+	struct vdecdd_picture *picture =
+		(struct vdecdd_picture *)str_unit->dd_pict_data;
+
+	memset(pict_cmds, 0, sizeof(pict_cmds));
+	memset(&buffers, 0, sizeof(buffers));
+
+	VDEC_ASSERT(batch_msgbuf_info->buf_size >= CTRL_ALLOC_MAX_SEGMENT_SIZE);
+	memset(batch_msgbuf_info->cpu_virt, 0, batch_msgbuf_info->buf_size);
+
+	/* Construct transaction based on new picture. */
+	VDEC_ASSERT(str_unit->str_unit_type == VDECDD_STRUNIT_PICTURE_START);
+
+	/* Obtain picture data. */
+	picture = (struct vdecdd_picture *)str_unit->dd_pict_data;
+	dec_pict->recon_pict = &picture->disp_pict_buf;
+
+	result = translation_get_codec(pstr_config_data->vid_std, &codec);
+	if (result != IMG_SUCCESS)
+		return result;
+
+	translation_setup_std_header(pstr_config_data, dec_pict, str_unit, &hdr_size, picture,
+				     pict_cmds, &parser_mode);
+
+	buffers.recon_pict = dec_pict->recon_pict;
+	buffers.alt_pict = dec_pict->alt_pict;
+
+#ifdef HAS_HEVC
+	/* Set pipe offsets to device buffers */
+	if (pstr_config_data->vid_std == VDEC_STD_HEVC) {
+		/* FW in multipipe requires this buffers to be allocated per stream */
+		if (picture->pict_res_int && picture->pict_res_int->seq_resint &&
+		    picture->pict_res_int->seq_resint->intra_buffer &&
+		    picture->pict_res_int->seq_resint->aux_buffer) {
+			buffers.intra_bufinfo =
+				&picture->pict_res_int->seq_resint->intra_buffer->ddbuf_info;
+			buffers.auxline_bufinfo =
+				&picture->pict_res_int->seq_resint->aux_buffer->ddbuf_info;
+		}
+	} else {
+		buffers.intra_bufinfo = dec_pict->intra_bufinfo;
+		buffers.auxline_bufinfo = dec_pict->auxline_bufinfo;
+	}
+
+	if (buffers.intra_bufinfo)
+		buffers.intra_bufsize_per_pipe = buffers.intra_bufinfo->buf_size /
+			core_props->num_pixel_pipes;
+	if (buffers.auxline_bufinfo)
+		buffers.auxline_bufsize_per_pipe = buffers.auxline_bufinfo->buf_size /
+			core_props->num_pixel_pipes;
+#endif
+
+#ifdef ERROR_CONCEALMENT
+	if (picture->pict_res_int && picture->pict_res_int->seq_resint)
+		if (picture->pict_res_int->seq_resint->err_pict_buf)
+			buffers.err_pict_bufinfo =
+				&picture->pict_res_int->seq_resint->err_pict_buf->ddbuf_info;
+#endif
+
+	/*
+	 * Prepare Reconstructed Picture Configuration
+	 * Note: we are obtaining values of registers prepared basing on header
+	 * files generated from MSVDX *dev files.
+	 * That's allowed, as layout of registers: MSVDX_CMDS_OPERATING_MODE,
+	 * MSVDX_CMDS_EXTENDED_ROW_STRIDE,
+	 * MSVDX_CMDS_ALTERNATIVE_OUTPUT_PICTURE_ROTATION,
+	 * MSVDX_CMDS_CHROMA_ROW_STRIDE is the same for both MSVDX and PVDEC.
+	 */
+	vxd_set_reconpictcmds(str_unit, pstr_config_data, &picture->op_config, core_props,
+			      &buffers, pict_cmds);
+
+	/* Alternative Picture Configuration */
+	if (dec_pict->alt_pict) {
+		dec_pict->twopass = picture->op_config.force_oold;
+		buffers.btwopass = dec_pict->twopass;
+		/*
+		 * Alternative Picture Configuration
+		 * Note: we are obtaining values of registers prepared basing
+		 * on header files generated from MSVDX *dev files.
+		 * That's allowed, as layout of registers:
+		 * MSVDX_CMDS_OPERATING_MODE, MSVDX_CMDS_EXTENDED_ROW_STRIDE,
+		 * MSVDX_CMDS_ALTERNATIVE_OUTPUT_PICTURE_ROTATION,
+		 * MSVDX_CMDS_CHROMA_ROW_STRIDE is the same for both MSVDX and
+		 * PVDEC.
+		 */
+		/*
+		 * Configure second buffer for out-of-loop processing
+		 * (e.g. scaling etc.).
+		 */
+		vxd_set_altpictcmds(str_unit, pstr_config_data, &picture->op_config, core_props,
+				    &buffers, pict_cmds);
+	}
+
+	/*
+	 * Setup initial simple bitstream configuration to be used by parser
+	 * task
+	 */
+	cmd_buf = (unsigned int *)ctrl_alloc;
+	result = translation_pvdec_adddma_transfers
+			(decpic_seg_list, &cmd_buf,
+			 (ctrl_alloc_end - (unsigned long)cmd_buf) / sizeof(unsigned int),
+			 dec_pict, str_unit->eop);
+	if (result != IMG_SUCCESS)
+		return result;
+
+	if ((unsigned long)(cmd_buf + (sizeof(struct ctrl_alloc_header) +
+		sizeof(struct vdec_ext_cmd)) / sizeof(unsigned int)) >=
+		ctrl_alloc_end)
+		return IMG_ERROR_INVALID_PARAMETERS;
+
+	/*
+	 * Setup regular control allocation message. Start with control
+	 * allocation header
+	 */
+	translation_pvdec_ctrl_setuphdr((struct ctrl_alloc_header *)cmd_buf, pict_cmds);
+	/* Setup additional params for VP8 */
+	cmd_buf += sizeof(struct ctrl_alloc_header) / sizeof(unsigned int);
+
+	/* Reserve space for VDEC extension command and fill it */
+	vdec_ext = (struct vdec_ext_cmd *)cmd_buf;
+	cmd_buf += sizeof(struct vdec_ext_cmd) / sizeof(unsigned int);
+
+	result = translation_pvdecsetup_vdecext(vdec_ext, dec_pict, pict_cmds,
+						str_unit,
+						pstr_config_data->vid_std,
+						parser_mode);
+	if (result != IMG_SUCCESS)
+		return result;
+
+	vdec_ext->hdr_size = hdr_size;
+
+	/* Add VLC tables to control allocation, skip when VC1 */
+	if (pstr_config_data->vid_std != VDEC_STD_VC1 &&
+	    dec_pict->vlc_idx_tables_bufinfo &&
+	    dec_pict->vlc_idx_tables_bufinfo->cpu_virt) {
+		unsigned short *vlc_idx_tables = (unsigned short *)
+			dec_pict->vlc_idx_tables_bufinfo->cpu_virt;
+		/*
+		 * Get count of elements in VLC idx table. Each element is made
+		 * of 3 IMG_UINT16, see e.g. mpeg2_idx.c
+		 */
+		unsigned int vlc_idx_count =
+			dec_pict->vlc_idx_tables_bufinfo->buf_size /
+			(3 * sizeof(unsigned short));
+
+		/* Add command to DMA VLC */
+		result = translation_pvdecsetup_vlcdma
+				(dec_pict->vlc_tables_bufinfo, &cmd_buf,
+				(ctrl_alloc_end - (unsigned long)cmd_buf) / sizeof(unsigned int));
+
+		if (result != IMG_SUCCESS)
+			return result;
+
+		/* Add command to configure VLC tables */
+		result = translation_pvdecsetup_vlctables
+				((unsigned short (*)[3])vlc_idx_tables, vlc_idx_count, &cmd_buf,
+				 (ctrl_alloc_end - (unsigned long)cmd_buf) / sizeof(unsigned int),
+				 regs_offset->vec_offset);
+
+		if (result != IMG_SUCCESS)
+			return result;
+	}
+
+	/* Setup commands for standards other than HEVC */
+	if (pstr_config_data->vid_std != VDEC_STD_HEVC) {
+		translation_pvdec_setup_commands
+				(pict_cmds, &cmd_buf,
+				 (ctrl_alloc_end - (unsigned long)cmd_buf) / sizeof(unsigned int),
+				 regs_offset->vdmc_cmd_offset);
+	}
+
+	/* Setup commands for HEVC */
+	vdec_ext->mem_to_reg_size = 0;
+
+#ifdef HAS_HEVC
+	if (pstr_config_data->vid_std == VDEC_STD_HEVC) {
+		result = translation_pvdec_setup_pvdec_commands
+				(picture, dec_pict, str_unit,
+				 regs_offset, &cmd_buf,
+				 (ctrl_alloc_end - (unsigned long)cmd_buf) / sizeof(unsigned int),
+				 &memto_reg_host_part, pict_cmds);
+		if (result != IMG_SUCCESS) {
+			pr_err("Failed to setup VDMC & VDEB firmware commands.");
+			return result;
+		}
+
+		/* Set size of MemToReg buffer in VDEC extension command */
+		VDEC_ASSERT(MEM_TO_REG_BUF_SIZE <
+			(MEM2REG_SIZE_BUF_TOTAL_MASK >> MEM2REG_SIZE_BUF_TOTAL_SHIFT));
+		VDEC_ASSERT(memto_reg_host_part <
+			(MEM2REG_SIZE_HOST_PART_MASK >> MEM2REG_SIZE_HOST_PART_SHIFT));
+
+		vdec_ext->mem_to_reg_size = (MEM_TO_REG_BUF_SIZE << MEM2REG_SIZE_BUF_TOTAL_SHIFT) |
+			(memto_reg_host_part << MEM2REG_SIZE_HOST_PART_SHIFT);
+
+		dec_pict->genc_id = picture->pict_res_int->seq_resint->genc_buf_id;
+		dec_pict->genc_bufs = picture->pict_res_int->seq_resint->genc_buffers;
+	}
+#endif
+	/* Finally mark end of commands */
+	*(cmd_buf++) = CMD_COMPLETION;
+
+	/* Print message for debugging */
+	{
+		int i;
+
+		for (i = 0; i < ((unsigned long)cmd_buf - ctrl_alloc) / sizeof(unsigned int); i++)
+			pr_debug("ctrl_alloc_buf[%d] == %08x\n", i,
+				 ((unsigned int *)ctrl_alloc)[i]);
+	}
+	/* Transfer control allocation command to device memory */
+	dec_pict->ctrl_alloc_bytes = ((unsigned long)cmd_buf - ctrl_alloc);
+	dec_pict->ctrl_alloc_offset = dec_pict->ctrl_alloc_bytes;
+	dec_pict->operating_op = pict_cmds[VDECFW_CMD_OPERATING_MODE];
+
+	/*
+	 * NOTE : Nothing related to tiling will be used.
+	 * result = translation_ConfigureTiling(psStrUnit, psDecPict,
+	 * psCoreProps);
+	 */
+
+	return result;
+};
+
+int translation_fragment_prepare(struct dec_decpict *dec_pict,
+				 struct lst_t *decpic_seg_list, int eop,
+				 struct dec_pict_fragment *pict_fragement)
+{
+	int result;
+	unsigned int *cmd_buf;
+	struct vidio_ddbufinfo *batchmsg_bufinfo;
+	unsigned long ctrl_alloc;
+	unsigned long ctrl_alloc_end;
+
+	if (!dec_pict || !dec_pict->batch_msginfo ||
+	    !decpic_seg_list || !pict_fragement)
+		return IMG_ERROR_INVALID_PARAMETERS;
+
+	batchmsg_bufinfo = dec_pict->batch_msginfo->ddbuf_info;
+
+	ctrl_alloc = (unsigned long)batchmsg_bufinfo->cpu_virt +
+		dec_pict->ctrl_alloc_offset;
+	ctrl_alloc_end = (unsigned long)batchmsg_bufinfo->cpu_virt +
+		batchmsg_bufinfo->buf_size;
+
+	/*
+	 * Setup initial simple bitstream configuration to be used by parser
+	 * task
+	 */
+	cmd_buf = (unsigned int *)ctrl_alloc;
+	result = translation_pvdec_adddma_transfers
+			(decpic_seg_list, &cmd_buf,
+			(ctrl_alloc_end - (unsigned long)cmd_buf) / sizeof(unsigned int),
+			dec_pict, eop);
+
+	if (result != IMG_SUCCESS)
+		return result;
+
+	/* Finally mark end of commands */
+	*(cmd_buf++) = CMD_COMPLETION;
+
+	/* Transfer control allocation command to device memory */
+	pict_fragement->ctrl_alloc_offset = dec_pict->ctrl_alloc_offset;
+	pict_fragement->ctrl_alloc_bytes =
+		((unsigned long)cmd_buf - ctrl_alloc);
+
+	dec_pict->ctrl_alloc_offset += pict_fragement->ctrl_alloc_bytes;
+
+	return result;
+};
+#endif /* VDEC_USE_PVDEC */
diff --git a/drivers/staging/media/vxd/decoder/translation_api.h b/drivers/staging/media/vxd/decoder/translation_api.h
new file mode 100644
index 000000000000..43c570760d57
--- /dev/null
+++ b/drivers/staging/media/vxd/decoder/translation_api.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * VDECDD translation API's.
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ *	Amit Makani <amit.makani@ti.com>
+ *
+ * Re-written for upstreamimg
+ *	Sidraya Jayagond <sidraya.bj@pathpartnertech.com>
+ *	Prashanth Kumar Amai <prashanth.ka@pathpartnertech.com>
+ */
+#ifndef __TRANSLATION_API_H__
+#define __TRANSLATION_API_H__
+
+#include "decoder.h"
+#include "hw_control.h"
+#include "vdecdd_defs.h"
+#include "vdec_defs.h"
+#include "vxd_props.h"
+
+/*
+ * This function submits a stream unit for translation
+ * into a control allocation buffer used in PVDEC operation.
+ */
+int translation_ctrl_alloc_prepare
+	(struct vdec_str_configdata *psstr_config_data,
+	struct vdecdd_str_unit *psstrunit,
+	struct dec_decpict *psdecpict,
+	const struct vxd_coreprops *core_props,
+	struct decoder_regsoffsets *regs_offset);
+
+/*
+ * TRANSLATION_FragmentPrepare.
+ */
+int translation_fragment_prepare(struct dec_decpict *psdecpict,
+				 struct lst_t *decpic_seg_list, int eop,
+				 struct dec_pict_fragment *pict_fragement);
+
+#endif /* __TRANSLATION_API_H__ */
-- 
2.17.1


-- 






This
message contains confidential information and is intended only 
for the
individual(s) named. If you are not the intended
recipient, you are 
notified that disclosing, copying, distributing or taking any
action in 
reliance on the contents of this mail and attached file/s is strictly
prohibited. Please notify the
sender immediately and delete this e-mail 
from your system. E-mail transmission
cannot be guaranteed to be secured or 
error-free as information could be
intercepted, corrupted, lost, destroyed, 
arrive late or incomplete, or contain
viruses. The sender therefore does 
not accept liability for any errors or
omissions in the contents of this 
message, which arise as a result of e-mail
transmission.

^ permalink raw reply related	[flat|nested] 48+ messages in thread

* [PATCH 09/30] v4l: vxd-dec: Add idgen api modules
  2021-08-18 14:10 [PATCH 00/30] TI Video Decoder driver upstreaming to v5.14-rc6 kernel sidraya.bj
                   ` (7 preceding siblings ...)
  2021-08-18 14:10 ` [PATCH 08/30] v4l: vxd-dec: Add translation control modules sidraya.bj
@ 2021-08-18 14:10 ` sidraya.bj
  2021-08-24 14:00   ` Dan Carpenter
  2021-08-18 14:10 ` [PATCH 10/30] v4l: vxd-dec: Add utility modules sidraya.bj
                   ` (22 subsequent siblings)
  31 siblings, 1 reply; 48+ messages in thread
From: sidraya.bj @ 2021-08-18 14:10 UTC (permalink / raw)
  To: gregkh, linux-staging, linux-kernel
  Cc: prashanth.ka, praneeth, mchehab, linux-media, praveen.ap, Sidraya

From: Sidraya <sidraya.bj@pathpartnertech.com>

This patch creates and destroy the context for idgen and
it returns ids, based on ids it manage the resources

Signed-off-by: Amit Makani <amit.makani@ti.com>
Signed-off-by: Sidraya <sidraya.bj@pathpartnertech.com>
---
 MAINTAINERS                                  |   2 +
 drivers/staging/media/vxd/common/idgen_api.c | 449 +++++++++++++++++++
 drivers/staging/media/vxd/common/idgen_api.h |  59 +++
 3 files changed, 510 insertions(+)
 create mode 100644 drivers/staging/media/vxd/common/idgen_api.c
 create mode 100644 drivers/staging/media/vxd/common/idgen_api.h

diff --git a/MAINTAINERS b/MAINTAINERS
index 538faa644d13..0468aaac3b7d 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -19537,6 +19537,8 @@ M:	Sidraya Jayagond <sidraya.bj@pathpartnertech.com>
 L:	linux-media@vger.kernel.org
 S:	Maintained
 F:	Documentation/devicetree/bindings/media/img,d5520-vxd.yaml
+F:	drivers/staging/media/vxd/common/idgen_api.c
+F:	drivers/staging/media/vxd/common/idgen_api.h
 F:	drivers/staging/media/vxd/common/img_mem_man.c
 F:	drivers/staging/media/vxd/common/img_mem_man.h
 F:	drivers/staging/media/vxd/common/img_mem_unified.c
diff --git a/drivers/staging/media/vxd/common/idgen_api.c b/drivers/staging/media/vxd/common/idgen_api.c
new file mode 100644
index 000000000000..abc8660d7a4a
--- /dev/null
+++ b/drivers/staging/media/vxd/common/idgen_api.c
@@ -0,0 +1,449 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ID generation manager API.
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ *	Amit Makani <amit.makani@ti.com>
+ *
+ * Re-written for upstreamimg
+ *	Sidraya Jayagond <sidraya.bj@pathpartnertech.com>
+ *	Prashanth Kumar Amai <prashanth.ka@pathpartnertech.com>
+ */
+
+#include <linux/dma-mapping.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-mem2mem.h>
+
+#include "idgen_api.h"
+#include "lst.h"
+
+/*
+ * This structure contains ID context.
+ */
+struct idgen_context {
+	/* List of handle block structures */
+	struct lst_t hdlblklst;
+	/* Max ID - set by IDGEN_CreateContext(). */
+	unsigned int maxid;
+	/*
+	 * The number of handle per block. In case of
+	 * incrementing ids, size of the Hash table.
+	 */
+	unsigned int blksize;
+	/* Next free slot. */
+	unsigned int freeslot;
+	/* Max slot+1 for which we have allocated blocks. */
+	unsigned int maxslotplus1;
+	/* Incrementing ID's */
+	/* API needed to return incrementing IDs */
+	int incids;
+	/* Latest ID given back */
+	unsigned int latestincnumb;
+	/* Array of list to hold IDGEN_sHdlId */
+	struct lst_t *incidlist;
+};
+
+/*
+ * This structure represents internal representation of an Incrementing ID.
+ */
+struct idgen_id {
+	void **link; /* to be part of single linked list */
+	/* Incrementing ID returned */
+	unsigned int incid;
+	void *hid;
+};
+
+/*
+ * Structure contains the ID context.
+ */
+struct idgen_hdblk {
+	void **link; /* to be part of single linked list */
+	/* Array of handles in this block. */
+	void *ahhandles[1];
+};
+
+/*
+ * A hashing function could go here. Currently just makes a circular list of
+ * max number of concurrent Ids (idgen_context->blksize) in the system.
+ */
+static unsigned int idgen_func(struct idgen_context *idcontext, unsigned int id)
+{
+	return ((id - 1) % idcontext->blksize);
+}
+
+int idgen_createcontext(unsigned int maxid, unsigned int blksize,
+			int incid, void **idgenhandle)
+{
+	struct idgen_context *idcontext;
+
+	/* Create context structure */
+	idcontext = kzalloc(sizeof(*idcontext), GFP_KERNEL);
+	if (!idcontext)
+		return IMG_ERROR_OUT_OF_MEMORY;
+
+	/* InitIalise the context */
+	lst_init(&idcontext->hdlblklst);
+	idcontext->maxid   = maxid;
+	idcontext->blksize = blksize;
+
+	/* If we need incrementing Ids */
+	idcontext->incids = incid;
+	idcontext->latestincnumb = 0;
+	idcontext->incidlist  = NULL;
+	if (idcontext->incids) {
+		unsigned int i = 0;
+		/* Initialise the hash table of lists of length ui32BlkSize */
+		idcontext->incidlist = kzalloc((sizeof(*idcontext->incidlist) *
+				idcontext->blksize), GFP_KERNEL);
+		if (!idcontext->incidlist) {
+			kfree(idcontext);
+			return IMG_ERROR_OUT_OF_MEMORY;
+		}
+
+		/* Initialise all the lists in the hash table */
+		for (i = 0; i < idcontext->blksize; i++)
+			lst_init(&idcontext->incidlist[i]);
+	}
+
+	/* Return context structure as handle */
+	*idgenhandle = idcontext;
+
+	return IMG_SUCCESS;
+}
+
+int idgen_destroycontext(void *idgenhandle)
+{
+	struct idgen_context *idcontext = (struct idgen_context *)idgenhandle;
+	struct idgen_hdblk *hdblk;
+
+	if (!idcontext)
+		return IMG_ERROR_INVALID_PARAMETERS;
+
+	/* If incrementing Ids, free the List of Incrementing Ids */
+	if (idcontext->incids) {
+		struct idgen_id *id;
+		unsigned int i = 0;
+
+		for (i = 0; i < idcontext->blksize; i++) {
+			id = lst_removehead(&idcontext->incidlist[i]);
+			while (id) {
+				kfree(id);
+				id = lst_removehead(&idcontext->incidlist[i]);
+			}
+		}
+		kfree(idcontext->incidlist);
+	}
+
+	/* Remove and free all handle blocks */
+	hdblk = (struct idgen_hdblk *)lst_removehead(&idcontext->hdlblklst);
+	while (hdblk) {
+		kfree(hdblk);
+		hdblk = (struct idgen_hdblk *)
+				lst_removehead(&idcontext->hdlblklst);
+	}
+
+	/* Free context structure */
+	kfree(idcontext);
+
+	return IMG_SUCCESS;
+}
+
+static int idgen_findnextfreeslot(void *idgenhandle, unsigned int prevfreeslot)
+{
+	struct idgen_context *idcontext = (struct idgen_context *)idgenhandle;
+	struct idgen_hdblk *hdblk;
+	unsigned int freslotblk;
+	unsigned int freeslot;
+
+	if (!idcontext)
+		return IMG_ERROR_INVALID_PARAMETERS;
+
+	/* Find the block containing the current free slot */
+	freeslot	= prevfreeslot;
+	freslotblk = prevfreeslot;
+	hdblk = (struct idgen_hdblk *)lst_first(&idcontext->hdlblklst);
+	if (!hdblk)
+		return IMG_ERROR_FATAL;
+
+	while (freslotblk >= idcontext->blksize) {
+		freslotblk -= idcontext->blksize;
+		hdblk = (struct idgen_hdblk *)lst_next(hdblk);
+	}
+
+	/* Locate the next free slot */
+	while (hdblk) {
+		while (freslotblk < idcontext->blksize) {
+			if (!hdblk->ahhandles[freslotblk]) {
+				/* Found */
+				idcontext->freeslot = freeslot;
+				return IMG_SUCCESS;
+			}
+			freeslot++;
+			freslotblk++;
+		}
+		freslotblk = 0;
+		hdblk = (struct idgen_hdblk *)lst_next(hdblk);
+	}
+
+	/* Beyond the last block */
+	idcontext->freeslot = freeslot;
+	return IMG_SUCCESS;
+}
+
+/*
+ * This function returns ID structure (
+ */
+static struct idgen_id *idgen_getid(struct lst_t *idlist, unsigned int id)
+{
+	struct idgen_id *idstruct;
+
+	idstruct = lst_first(idlist);
+	while (idstruct) {
+		if (idstruct->incid == id)
+			break;
+
+		idstruct = lst_next(idstruct);
+	}
+	return idstruct;
+}
+
+/*
+ * This function does IDGEN allocation.
+ */
+int idgen_allocid(void *idgenhandle, void *handle, unsigned int *id)
+{
+	struct idgen_context *idcontext = (struct idgen_context *)idgenhandle;
+	struct idgen_hdblk *hdblk;
+	unsigned int size = 0;
+	unsigned int freeslot = 0;
+	unsigned int result = 0;
+
+	if (!idcontext || !handle)
+		return IMG_ERROR_INVALID_PARAMETERS;
+
+	if (!idcontext->incids) {
+		/* If the free slot is >= to the max id */
+		if (idcontext->freeslot >= idcontext->maxid) {
+			result = IMG_ERROR_INVALID_ID;
+			goto error;
+		}
+
+		/* If all of the allocated Ids have been used */
+		if (idcontext->freeslot >= idcontext->maxslotplus1) {
+			/* Allocate a stream context */
+			size = sizeof(*hdblk) + (sizeof(void *) *
+				(idcontext->blksize - 1));
+			hdblk = kzalloc(size, GFP_KERNEL);
+			if (!hdblk) {
+				result = IMG_ERROR_OUT_OF_MEMORY;
+				goto error;
+			}
+
+			lst_add(&idcontext->hdlblklst, hdblk);
+			idcontext->maxslotplus1 += idcontext->blksize;
+		}
+
+		/* Find the block containing the next free slot */
+		freeslot = idcontext->freeslot;
+		hdblk = (struct idgen_hdblk *)lst_first(&idcontext->hdlblklst);
+		if (!hdblk) {
+			result = IMG_ERROR_FATAL;
+			goto error;
+		}
+		while (freeslot >= idcontext->blksize) {
+			freeslot -= idcontext->blksize;
+			hdblk = (struct idgen_hdblk *)lst_next(hdblk);
+			if (!hdblk) {
+				result = IMG_ERROR_FATAL;
+				goto error;
+			}
+		}
+
+		/* Put handle in the next free slot */
+		hdblk->ahhandles[freeslot] = handle;
+
+		*id = idcontext->freeslot + 1;
+
+		/* Find a new free slot */
+		result = idgen_findnextfreeslot(idcontext, idcontext->freeslot);
+		if (result != 0)
+			goto error;
+	/*
+	 * If incrementing IDs, just add the ID node to the correct hash table
+	 * list.
+	 */
+	} else {
+		struct idgen_id *psid;
+		unsigned int currentincnum, funcid;
+		/*
+		 * If incrementing IDs, increment the id for returning back,and
+		 * save the ID node in the list of ids, indexed by hash function
+		 * (idgen_func). We might want to use a better hashing function
+		 */
+		currentincnum = (idcontext->latestincnumb + 1) %
+				idcontext->maxid;
+
+		/* Increment the id. Wraps if greater then Max Id */
+		if (currentincnum == 0)
+			currentincnum++;
+
+		idcontext->latestincnumb = currentincnum;
+
+		result = IMG_ERROR_INVALID_ID;
+		do {
+			/* Add to list in the correct hash table entry */
+			funcid = idgen_func(idcontext, idcontext->latestincnumb);
+			if (idgen_getid(&idcontext->incidlist[funcid],
+					idcontext->latestincnumb) == NULL) {
+				psid = kmalloc(sizeof(*psid), GFP_KERNEL);
+				if (!psid) {
+					result = IMG_ERROR_OUT_OF_MEMORY;
+					goto error;
+				}
+
+				psid->incid = idcontext->latestincnumb;
+				psid->hid = handle;
+
+				funcid = idgen_func(idcontext,
+						    idcontext->latestincnumb);
+				lst_add(&idcontext->incidlist[funcid],
+					psid);
+
+				result = IMG_SUCCESS;
+			} else {
+				idcontext->latestincnumb =
+					(idcontext->latestincnumb + 1) %
+						idcontext->maxid;
+				if (idcontext->latestincnumb == 0) {
+					/* Do not want to have zero as pic id */
+					idcontext->latestincnumb++;
+				}
+				/*
+				 * We have reached a point where we have wrapped
+				 * allowed Ids (MaxId) and we want to overwrite
+				 * ID still not released
+				 */
+				if (idcontext->latestincnumb == currentincnum)
+					goto error;
+			}
+		} while (result != IMG_SUCCESS);
+
+		*id = psid->incid;
+	}
+	return IMG_SUCCESS;
+error:
+	return result;
+}
+
+int idgen_freeid(void *idgenhandle, unsigned int id)
+{
+	struct idgen_context *idcontext = (struct idgen_context *)idgenhandle;
+	struct idgen_hdblk *hdblk;
+	unsigned int origslot;
+	unsigned int slot;
+
+	if (idcontext->incids) {
+		/*
+		 * Find the slot in the correct hash table entry, and
+		 * remove the ID.
+		 */
+		struct idgen_id *psid;
+
+		psid = idgen_getid(&idcontext->incidlist
+				[idgen_func(idcontext, id)], id);
+		if (psid) {
+			lst_remove(&idcontext->incidlist
+					[idgen_func(idcontext, id)], psid);
+			kfree(psid);
+		} else {
+			return IMG_ERROR_INVALID_ID;
+		}
+	} else {
+		/* If not incrementing id */
+		slot = id - 1;
+		origslot = slot;
+
+		if (slot >= idcontext->maxslotplus1)
+			return IMG_ERROR_INVALID_ID;
+
+		/* Find the block containing the id */
+		hdblk = (struct idgen_hdblk *)lst_first(&idcontext->hdlblklst);
+		if (!hdblk)
+			return IMG_ERROR_FATAL;
+
+		while (slot >= idcontext->blksize) {
+			slot -= idcontext->blksize;
+			hdblk = (struct idgen_hdblk *)lst_next(hdblk);
+			if (!hdblk)
+				return IMG_ERROR_FATAL;
+		}
+
+		/* Slot should be occupied */
+		if (!hdblk->ahhandles[slot])
+			return IMG_ERROR_INVALID_ID;
+
+		/* Free slot */
+		hdblk->ahhandles[slot] = NULL;
+
+		/* If this slot is before the previous free slot */
+		if ((origslot) < idcontext->freeslot)
+			idcontext->freeslot = origslot;
+	}
+	return IMG_SUCCESS;
+}
+
+int idgen_gethandle(void *idgenhandle, unsigned int id, void **handle)
+{
+	struct idgen_context *idcontext = (struct idgen_context *)idgenhandle;
+	struct idgen_hdblk *hdblk;
+	unsigned int slot;
+
+	if (!idcontext)
+		return IMG_ERROR_INVALID_PARAMETERS;
+
+	if (idcontext->incids) {
+		/*
+		 * Find the slot in the correct hash table entry, and return
+		 * the handles.
+		 */
+		struct idgen_id *psid;
+
+		psid = idgen_getid(&idcontext->incidlist
+				[idgen_func(idcontext, id)], id);
+		if (psid)
+			*handle = psid->hid;
+
+		else
+			return IMG_ERROR_INVALID_ID;
+	} else {
+		/* If not incrementing IDs */
+		slot = id - 1;
+		if (slot >= idcontext->maxslotplus1)
+			return IMG_ERROR_INVALID_ID;
+
+		/* Find the block containing the id */
+		hdblk = (struct idgen_hdblk *)lst_first(&idcontext->hdlblklst);
+		if (!hdblk)
+			return IMG_ERROR_INVALID_PARAMETERS;
+
+		while (slot >= idcontext->blksize) {
+			slot -= idcontext->blksize;
+			hdblk = (struct idgen_hdblk *)lst_next(hdblk);
+			if (!hdblk)
+				return IMG_ERROR_INVALID_PARAMETERS;
+		}
+
+		/* Slot should be occupied */
+		if (!hdblk->ahhandles[slot])
+			return IMG_ERROR_INVALID_ID;
+
+		/* Return the handle */
+		*handle = hdblk->ahhandles[slot];
+		}
+
+	return IMG_SUCCESS;
+}
diff --git a/drivers/staging/media/vxd/common/idgen_api.h b/drivers/staging/media/vxd/common/idgen_api.h
new file mode 100644
index 000000000000..6c894343f1fb
--- /dev/null
+++ b/drivers/staging/media/vxd/common/idgen_api.h
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * ID generation manager API.
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ *	Amit Makani <amit.makani@ti.com>
+ *
+ * Re-written for upstreamimg
+ *	Sidraya Jayagond <sidraya.bj@pathpartnertech.com>
+ *	Prashanth Kumar Amai <prashanth.ka@pathpartnertech.com>
+ */
+#ifndef __IDGENAPI_H__
+#define __IDGENAPI_H__
+
+#include <linux/types.h>
+
+#include "img_errors.h"
+
+/*
+ * This function is used to create Id generation context.
+ * NOTE: Should only be called once to setup the context structure.
+ * NOTE: The client is responsible for providing thread/process safe locks on
+ * the context structure to maintain coherence.
+ */
+int idgen_createcontext(unsigned int maxid, unsigned int blksize,
+			int incid, void **idgenhandle);
+
+/*
+ * This function is used to destroy an Id generation context.  This function
+ * discards any handle blocks associated with the context.
+ * NOTE: The client is responsible for providing thread/process safe locks on
+ * the context structure to maintain coherence.
+ */
+int idgen_destroycontext(void *idgenhandle);
+
+/*
+ * This function is used to associate a handle with an Id.
+ * NOTE: The client is responsible for providing thread/process safe locks on
+ * the context structure to maintain coherency.
+ */
+int idgen_allocid(void *idgenhandle, void *handle, unsigned int *id);
+
+/*
+ * This function is used to free an Id.
+ * NOTE: The client is responsible for providing thread/process safe locks on
+ * the context structure to maintain coherency.
+ */
+int idgen_freeid(void *idgenhandle, unsigned int id);
+
+/*
+ * This function is used to get the handle associated with an Id.
+ * NOTE: The client is responsible for providing thread/process safe locks on
+ * the context structure to maintain coherency.
+ */
+int idgen_gethandle(void *idgenhandle, unsigned int id, void **handle);
+#endif /* __IDGENAPI_H__ */
-- 
2.17.1


-- 






This
message contains confidential information and is intended only 
for the
individual(s) named. If you are not the intended
recipient, you are 
notified that disclosing, copying, distributing or taking any
action in 
reliance on the contents of this mail and attached file/s is strictly
prohibited. Please notify the
sender immediately and delete this e-mail 
from your system. E-mail transmission
cannot be guaranteed to be secured or 
error-free as information could be
intercepted, corrupted, lost, destroyed, 
arrive late or incomplete, or contain
viruses. The sender therefore does 
not accept liability for any errors or
omissions in the contents of this 
message, which arise as a result of e-mail
transmission.

^ permalink raw reply related	[flat|nested] 48+ messages in thread

* [PATCH 10/30] v4l: vxd-dec: Add utility modules
  2021-08-18 14:10 [PATCH 00/30] TI Video Decoder driver upstreaming to v5.14-rc6 kernel sidraya.bj
                   ` (8 preceding siblings ...)
  2021-08-18 14:10 ` [PATCH 09/30] v4l: vxd-dec: Add idgen api modules sidraya.bj
@ 2021-08-18 14:10 ` sidraya.bj
  2021-08-18 14:10 ` [PATCH 11/30] v4l: vxd-dec: Add TALMMU module sidraya.bj
                   ` (21 subsequent siblings)
  31 siblings, 0 replies; 48+ messages in thread
From: sidraya.bj @ 2021-08-18 14:10 UTC (permalink / raw)
  To: gregkh, linux-staging, linux-kernel
  Cc: prashanth.ka, praneeth, mchehab, linux-media, praveen.ap, Sidraya

From: Sidraya <sidraya.bj@pathpartnertech.com>

Contains utility module for double linked queues, single linked
lists and workqueue.

Signed-off-by: Lakshmi Sankar <lakshmisankar-t@ti.com>
Signed-off-by: Sidraya <sidraya.bj@pathpartnertech.com>
---
 MAINTAINERS                                   |   6 +
 drivers/staging/media/vxd/common/dq.c         | 248 ++++++++++++++++++
 drivers/staging/media/vxd/common/dq.h         |  36 +++
 drivers/staging/media/vxd/common/lst.c        | 119 +++++++++
 drivers/staging/media/vxd/common/lst.h        |  37 +++
 drivers/staging/media/vxd/common/work_queue.c | 188 +++++++++++++
 drivers/staging/media/vxd/common/work_queue.h |  66 +++++
 7 files changed, 700 insertions(+)
 create mode 100644 drivers/staging/media/vxd/common/dq.c
 create mode 100644 drivers/staging/media/vxd/common/dq.h
 create mode 100644 drivers/staging/media/vxd/common/lst.c
 create mode 100644 drivers/staging/media/vxd/common/lst.h
 create mode 100644 drivers/staging/media/vxd/common/work_queue.c
 create mode 100644 drivers/staging/media/vxd/common/work_queue.h

diff --git a/MAINTAINERS b/MAINTAINERS
index 0468aaac3b7d..2668eeb89a34 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -19537,6 +19537,8 @@ M:	Sidraya Jayagond <sidraya.bj@pathpartnertech.com>
 L:	linux-media@vger.kernel.org
 S:	Maintained
 F:	Documentation/devicetree/bindings/media/img,d5520-vxd.yaml
+F:	drivers/staging/media/vxd/common/dq.c
+F:	drivers/staging/media/vxd/common/dq.h
 F:	drivers/staging/media/vxd/common/idgen_api.c
 F:	drivers/staging/media/vxd/common/idgen_api.h
 F:	drivers/staging/media/vxd/common/img_mem_man.c
@@ -19544,6 +19546,10 @@ F:	drivers/staging/media/vxd/common/img_mem_man.h
 F:	drivers/staging/media/vxd/common/img_mem_unified.c
 F:	drivers/staging/media/vxd/common/imgmmu.c
 F:	drivers/staging/media/vxd/common/imgmmu.h
+F:	drivers/staging/media/vxd/common/lst.c
+F:	drivers/staging/media/vxd/common/lst.h
+F:	drivers/staging/media/vxd/common/work_queue.c
+F:	drivers/staging/media/vxd/common/work_queue.h
 F:	drivers/staging/media/vxd/decoder/hw_control.c
 F:	drivers/staging/media/vxd/decoder/hw_control.h
 F:	drivers/staging/media/vxd/decoder/img_dec_common.h
diff --git a/drivers/staging/media/vxd/common/dq.c b/drivers/staging/media/vxd/common/dq.c
new file mode 100644
index 000000000000..890be5ed00e7
--- /dev/null
+++ b/drivers/staging/media/vxd/common/dq.c
@@ -0,0 +1,248 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Utility module for doubly linked queues.
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ *	Lakshmi Sankar <lakshmisankar-t@ti.com>
+ *
+ * Re-written for upstreamimg
+ *	Sidraya Jayagond <sidraya.bj@pathpartnertech.com>
+ */
+
+#include <linux/types.h>
+#include <linux/dma-mapping.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-mem2mem.h>
+
+#include "dq.h"
+#include "img_errors.h"
+
+void dq_init(struct dq_linkage_t *queue)
+{
+	queue->fwd = (struct dq_linkage_t *)queue;
+	queue->back = (struct dq_linkage_t *)queue;
+}
+
+void dq_addhead(struct dq_linkage_t *queue, void *item)
+{
+	IMG_DBG_ASSERT(((struct dq_linkage_t *)queue)->back);
+	IMG_DBG_ASSERT(((struct dq_linkage_t *)queue)->fwd);
+
+	if (!((struct dq_linkage_t *)queue)->back ||
+	    !((struct dq_linkage_t *)queue)->fwd)
+		return;
+
+	((struct dq_linkage_t *)item)->back = (struct dq_linkage_t *)queue;
+	((struct dq_linkage_t *)item)->fwd =
+					((struct dq_linkage_t *)queue)->fwd;
+	((struct dq_linkage_t *)queue)->fwd->back = (struct dq_linkage_t *)item;
+	((struct dq_linkage_t *)queue)->fwd = (struct dq_linkage_t *)item;
+}
+
+void dq_addtail(struct dq_linkage_t *queue, void *item)
+{
+	IMG_DBG_ASSERT(((struct dq_linkage_t *)queue)->back);
+	IMG_DBG_ASSERT(((struct dq_linkage_t *)queue)->fwd);
+
+	if (!((struct dq_linkage_t *)queue)->back ||
+	    !((struct dq_linkage_t *)queue)->fwd)
+		return;
+
+	((struct dq_linkage_t *)item)->fwd = (struct dq_linkage_t *)queue;
+	((struct dq_linkage_t *)item)->back =
+					((struct dq_linkage_t *)queue)->back;
+	((struct dq_linkage_t *)queue)->back->fwd = (struct dq_linkage_t *)item;
+	((struct dq_linkage_t *)queue)->back = (struct dq_linkage_t *)item;
+}
+
+int dq_empty(struct dq_linkage_t *queue)
+{
+	IMG_DBG_ASSERT(((struct dq_linkage_t *)queue)->back);
+	IMG_DBG_ASSERT(((struct dq_linkage_t *)queue)->fwd);
+
+	if (!((struct dq_linkage_t *)queue)->back ||
+	    !((struct dq_linkage_t *)queue)->fwd)
+		return 1;
+
+	return ((queue)->fwd == (struct dq_linkage_t *)(queue));
+}
+
+void *dq_first(struct dq_linkage_t *queue)
+{
+	struct dq_linkage_t *temp = queue->fwd;
+
+	IMG_DBG_ASSERT(((struct dq_linkage_t *)queue)->back);
+	IMG_DBG_ASSERT(((struct dq_linkage_t *)queue)->fwd);
+
+	if (!((struct dq_linkage_t *)queue)->back ||
+	    !((struct dq_linkage_t *)queue)->fwd)
+		return NULL;
+
+	return temp == (struct dq_linkage_t *)queue ? NULL : temp;
+}
+
+void *dq_last(struct dq_linkage_t *queue)
+{
+	struct dq_linkage_t *temp = queue->back;
+
+	IMG_DBG_ASSERT(((struct dq_linkage_t *)queue)->back);
+	IMG_DBG_ASSERT(((struct dq_linkage_t *)queue)->fwd);
+
+	if (!((struct dq_linkage_t *)queue)->back ||
+	    !((struct dq_linkage_t *)queue)->fwd)
+		return NULL;
+
+	return temp == (struct dq_linkage_t *)queue ? NULL : temp;
+}
+
+void *dq_next(void *item)
+{
+	IMG_DBG_ASSERT(((struct dq_linkage_t *)item)->back);
+	IMG_DBG_ASSERT(((struct dq_linkage_t *)item)->fwd);
+
+	if (!((struct dq_linkage_t *)item)->back ||
+	    !((struct dq_linkage_t *)item)->fwd)
+		return NULL;
+
+	return ((struct dq_linkage_t *)item)->fwd;
+}
+
+void *dq_previous(void *item)
+{
+	IMG_DBG_ASSERT(((struct dq_linkage_t *)item)->back);
+	IMG_DBG_ASSERT(((struct dq_linkage_t *)item)->fwd);
+
+	if (!((struct dq_linkage_t *)item)->back ||
+	    !((struct dq_linkage_t *)item)->fwd)
+		return NULL;
+
+	return ((struct dq_linkage_t *)item)->back;
+}
+
+void dq_remove(void *item)
+{
+	IMG_DBG_ASSERT(((struct dq_linkage_t *)item)->back);
+	IMG_DBG_ASSERT(((struct dq_linkage_t *)item)->fwd);
+
+	if (!((struct dq_linkage_t *)item)->back ||
+	    !((struct dq_linkage_t *)item)->fwd)
+		return;
+
+	((struct dq_linkage_t *)item)->fwd->back =
+					((struct dq_linkage_t *)item)->back;
+	((struct dq_linkage_t *)item)->back->fwd =
+					((struct dq_linkage_t *)item)->fwd;
+
+	/* make item linkages safe for "orphan" removes */
+	((struct dq_linkage_t *)item)->fwd = item;
+	((struct dq_linkage_t *)item)->back = item;
+}
+
+void *dq_removehead(struct dq_linkage_t *queue)
+{
+	struct dq_linkage_t *temp;
+
+	IMG_DBG_ASSERT(((struct dq_linkage_t *)queue)->back);
+	IMG_DBG_ASSERT(((struct dq_linkage_t *)queue)->fwd);
+
+	if (!((struct dq_linkage_t *)queue)->back ||
+	    !((struct dq_linkage_t *)queue)->fwd)
+		return NULL;
+
+	if ((queue)->fwd == (struct dq_linkage_t *)(queue))
+		return NULL;
+
+	temp = ((struct dq_linkage_t *)queue)->fwd;
+	temp->fwd->back = temp->back;
+	temp->back->fwd = temp->fwd;
+
+	/* make item linkages safe for "orphan" removes */
+	temp->fwd = temp;
+	temp->back = temp;
+	return temp;
+}
+
+void *dq_removetail(struct dq_linkage_t *queue)
+{
+	struct dq_linkage_t *temp;
+
+	IMG_DBG_ASSERT(((struct dq_linkage_t *)queue)->back);
+	IMG_DBG_ASSERT(((struct dq_linkage_t *)queue)->fwd);
+
+	if (!((struct dq_linkage_t *)queue)->back ||
+	    !((struct dq_linkage_t *)queue)->fwd)
+		return NULL;
+
+	if ((queue)->fwd == (struct dq_linkage_t *)(queue))
+		return NULL;
+
+	temp = ((struct dq_linkage_t *)queue)->back;
+	temp->fwd->back = temp->back;
+	temp->back->fwd = temp->fwd;
+
+	/* make item linkages safe for "orphan" removes */
+	temp->fwd = temp;
+	temp->back = temp;
+
+	return temp;
+}
+
+void dq_addbefore(void *successor, void *item)
+{
+	IMG_DBG_ASSERT(((struct dq_linkage_t *)successor)->back);
+	IMG_DBG_ASSERT(((struct dq_linkage_t *)successor)->fwd);
+
+	if (!((struct dq_linkage_t *)successor)->back ||
+	    !((struct dq_linkage_t *)successor)->fwd)
+		return;
+
+	((struct dq_linkage_t *)item)->fwd = (struct dq_linkage_t *)successor;
+	((struct dq_linkage_t *)item)->back =
+				((struct dq_linkage_t *)successor)->back;
+	((struct dq_linkage_t *)item)->back->fwd = (struct dq_linkage_t *)item;
+	((struct dq_linkage_t *)successor)->back = (struct dq_linkage_t *)item;
+}
+
+void dq_addafter(void *predecessor, void *item)
+{
+	IMG_DBG_ASSERT(((struct dq_linkage_t *)predecessor)->back);
+	IMG_DBG_ASSERT(((struct dq_linkage_t *)predecessor)->fwd);
+
+	if (!((struct dq_linkage_t *)predecessor)->back ||
+	    !((struct dq_linkage_t *)predecessor)->fwd)
+		return;
+
+	((struct dq_linkage_t *)item)->fwd =
+				((struct dq_linkage_t *)predecessor)->fwd;
+	((struct dq_linkage_t *)item)->back =
+					(struct dq_linkage_t *)predecessor;
+	((struct dq_linkage_t *)item)->fwd->back = (struct dq_linkage_t *)item;
+	((struct dq_linkage_t *)predecessor)->fwd = (struct dq_linkage_t *)item;
+}
+
+void dq_move(struct dq_linkage_t *from, struct dq_linkage_t *to)
+{
+	IMG_DBG_ASSERT(((struct dq_linkage_t *)from)->back);
+	IMG_DBG_ASSERT(((struct dq_linkage_t *)from)->fwd);
+	IMG_DBG_ASSERT(((struct dq_linkage_t *)to)->back);
+	IMG_DBG_ASSERT(((struct dq_linkage_t *)to)->fwd);
+
+	if (!((struct dq_linkage_t *)from)->back ||
+	    !((struct dq_linkage_t *)from)->fwd ||
+	    !((struct dq_linkage_t *)to)->back ||
+	    !((struct dq_linkage_t *)to)->fwd)
+		return;
+
+	if ((from)->fwd == (struct dq_linkage_t *)(from)) {
+		dq_init(to);
+	} else {
+		*to = *from;
+		to->fwd->back = (struct dq_linkage_t *)to;
+		to->back->fwd = (struct dq_linkage_t *)to;
+		dq_init(from);
+	}
+}
diff --git a/drivers/staging/media/vxd/common/dq.h b/drivers/staging/media/vxd/common/dq.h
new file mode 100644
index 000000000000..4663a92aaf7a
--- /dev/null
+++ b/drivers/staging/media/vxd/common/dq.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Utility module for doubly linked queues.
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ *	Lakshmi Sankar <lakshmisankar-t@ti.com>
+ */
+#ifndef DQ_H
+#define DQ_H
+
+/* dq structure */
+struct dq_linkage_t {
+	struct dq_linkage_t *fwd;
+	struct dq_linkage_t *back;
+};
+
+/* Function Prototypes */
+void dq_addafter(void *predecessor, void *item);
+void dq_addbefore(void *successor, void *item);
+void dq_addhead(struct dq_linkage_t *queue, void *item);
+void dq_addtail(struct dq_linkage_t *queue, void *item);
+int dq_empty(struct dq_linkage_t *queue);
+void *dq_first(struct dq_linkage_t *queue);
+void *dq_last(struct dq_linkage_t *queue);
+void dq_init(struct dq_linkage_t *queue);
+void dq_move(struct dq_linkage_t *from, struct dq_linkage_t *to);
+void *dq_next(void *item);
+void *dq_previous(void *item);
+void dq_remove(void *item);
+void *dq_removehead(struct dq_linkage_t *queue);
+void *dq_removetail(struct dq_linkage_t *queue);
+
+#endif /* #define DQ_H */
diff --git a/drivers/staging/media/vxd/common/lst.c b/drivers/staging/media/vxd/common/lst.c
new file mode 100644
index 000000000000..bb047ab6d598
--- /dev/null
+++ b/drivers/staging/media/vxd/common/lst.c
@@ -0,0 +1,119 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * List processing primitives.
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Author:
+ *	Lakshmi Sankar <lakshmisankar-t@ti.com>
+ */
+
+#include "lst.h"
+
+#ifndef NULL
+#define NULL ((void *)0)
+#endif
+
+void lst_add(struct lst_t *list, void *item)
+{
+	if (!list->first) {
+		list->first = item;
+		list->last = item;
+	} else {
+		*list->last = item;
+		list->last = item;
+	}
+	*((void **)item) = NULL;
+}
+
+void lst_addhead(struct lst_t *list, void *item)
+{
+	if (!list->first) {
+		list->first = item;
+		list->last = item;
+		*((void **)item) = NULL;
+	} else {
+		*((void **)item) = list->first;
+		list->first = item;
+	}
+}
+
+int lst_empty(struct lst_t *list)
+{
+	if (!list->first)
+		return 1;
+	else
+		return 0;
+}
+
+void *lst_first(struct lst_t *list)
+{
+	return list->first;
+}
+
+void lst_init(struct lst_t *list)
+{
+	list->first = NULL;
+	list->last = NULL;
+}
+
+void *lst_last(struct lst_t *list)
+{
+	return list->last;
+}
+
+void *lst_next(void *item)
+{
+	return *((void **)item);
+}
+
+void *lst_removehead(struct lst_t *list)
+{
+	void **temp = list->first;
+
+	if (temp) {
+		list->first = *temp;
+		if (!list->first)
+			list->last = NULL;
+	}
+	return temp;
+}
+
+void *lst_remove(struct lst_t *list, void *item)
+{
+	void **p;
+	void **q;
+
+	p = (void **)list;
+	q = *p;
+	while (q) {
+		if (q == item) {
+			*p = *q;
+			if (list->last == q)
+				list->last = p;
+			return item;
+		}
+		p = q;
+		q = *p;
+	}
+
+	return NULL;
+}
+
+int lst_check(struct lst_t *list, void *item)
+{
+	void **p;
+	void **q;
+
+	p = (void **)list;
+	q = *p;
+	while (q) {
+		if (q == item)
+			return 1;
+		p = q;
+		q = *p;
+	}
+
+	return 0;
+}
diff --git a/drivers/staging/media/vxd/common/lst.h b/drivers/staging/media/vxd/common/lst.h
new file mode 100644
index 000000000000..ccf6eed19019
--- /dev/null
+++ b/drivers/staging/media/vxd/common/lst.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * List processing primitives.
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Author:
+ *	Lakshmi Sankar <lakshmisankar-t@ti.com>
+ */
+#ifndef __LIST_H__
+#define __LIST_H__
+
+#include <linux/types.h>
+
+struct lst_t {
+	void **first;
+	void **last;
+};
+
+void lst_add(struct lst_t *list, void *item);
+void lst_addhead(struct lst_t *list, void *item);
+
+/**
+ * lst_empty- Is list empty?
+ * @list: pointer to list
+ */
+int  lst_empty(struct lst_t *list);
+void *lst_first(struct lst_t *list);
+void lst_init(struct lst_t *list);
+void *lst_last(struct lst_t *list);
+void *lst_next(void *item);
+void *lst_remove(struct lst_t *list, void *item);
+void *lst_removehead(struct lst_t *list);
+int lst_check(struct lst_t *list, void *item);
+
+#endif /* __LIST_H__ */
diff --git a/drivers/staging/media/vxd/common/work_queue.c b/drivers/staging/media/vxd/common/work_queue.c
new file mode 100644
index 000000000000..6bd91a7fdbf4
--- /dev/null
+++ b/drivers/staging/media/vxd/common/work_queue.c
@@ -0,0 +1,188 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Work Queue Handling for Linux
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ *	Lakshmi Sankar <lakshmisankar-t@ti.com>
+ *
+ * Re-written for upstream
+ *	Prashanth Kumar Amai <prashanth.ka@pathpartnertech.com>
+ */
+
+#include <linux/slab.h>
+#include <linux/printk.h>
+#include <linux/mutex.h>
+
+#include "work_queue.h"
+
+/* Defining and initilizing mutex
+ */
+DEFINE_MUTEX(mutex);
+
+#define false 0
+#define true 1
+
+struct node {
+	void **key;
+	struct node *next;
+};
+
+struct node *work_head;
+struct node *delayed_work_head;
+
+void init_work(void **work_args, void *work_fn, uint8_t hwa_id)
+{
+	struct work_struct **work = (struct work_struct **)work_args;
+	//create a link
+	struct node *link = kmalloc(sizeof(*link), GFP_KERNEL);
+
+	*work = kzalloc(sizeof(*work), GFP_KERNEL);
+	if (!(*work)) {
+		pr_err("Memory allocation failed for work_queue\n");
+		return;
+	}
+	INIT_WORK(*work, work_fn);
+
+	link->key = (void **)work;
+	mutex_lock(&mutex);
+	//point it to old first node
+	link->next = work_head;
+
+	//point first to new first node
+	work_head = link;
+	mutex_unlock(&mutex);
+}
+
+void init_delayed_work(void **work_args, void *work_fn, uint8_t hwa_id)
+{
+	struct delayed_work **work = (struct delayed_work **)work_args;
+	//create a link
+	struct node *link = kmalloc(sizeof(*link), GFP_KERNEL);
+
+	*work = kzalloc(sizeof(*work), GFP_KERNEL);
+	if (!(*work)) {
+		pr_err("Memory allocation failed for delayed_work_queue\n");
+		return;
+	}
+	INIT_DELAYED_WORK(*work, work_fn);
+
+	link->key = (void **)work;
+	mutex_lock(&mutex);
+	//point it to old first node
+	link->next = delayed_work_head;
+
+	//point first to new first node
+	delayed_work_head = link;
+	mutex_unlock(&mutex);
+}
+
+/**
+ * get_work_buff - get_work_buff
+ * @key: key value
+ * @flag: flag
+ */
+
+void *get_work_buff(void *key, signed char flag)
+{
+	struct node *data = NULL;
+	void *work_new = NULL;
+	struct node *temp = NULL;
+	struct node *previous = NULL;
+	struct work_struct **work = NULL;
+
+	//start from the first link
+	mutex_lock(&mutex);
+	temp = work_head;
+
+	//if list is empty
+	if (!work_head) {
+		mutex_unlock(&mutex);
+		return NULL;
+	}
+
+	work = ((struct work_struct **)(temp->key));
+	//navigate through list
+	while (*work != key) {
+		//if it is last node
+		if (!temp->next) {
+			mutex_unlock(&mutex);
+			return NULL;
+		}
+			//store reference to current link
+			previous = temp;
+			//move to next link
+			temp = temp->next;
+			work = ((struct work_struct **)(temp->key));
+	}
+
+	if (flag) {
+		//found a match, update the link
+		if (temp == work_head) {
+			//change first to point to next link
+			work_head = work_head->next;
+		} else {
+			//bypass the current link
+			previous->next = temp->next;
+		}
+	}
+
+	mutex_unlock(&mutex);
+	//return temp;
+	data = temp;
+	if (data) {
+		work_new = data->key;
+		if (flag)
+			kfree(data);
+	}
+	return work_new;
+}
+
+void *get_delayed_work_buff(void *key, signed char flag)
+{
+	struct node *data = NULL;
+	void *dwork_new = NULL;
+	struct node *temp = NULL;
+	struct node *previous = NULL;
+	struct delayed_work **dwork = NULL;
+
+	if (flag) {
+		/* This Condition is true when kernel module is removed */
+		return delayed_work_head;
+	}
+	//start from the first link
+	mutex_lock(&mutex);
+	temp = delayed_work_head;
+
+	//if list is empty
+	if (!delayed_work_head) {
+		mutex_unlock(&mutex);
+		return NULL;
+	}
+
+	dwork = ((struct delayed_work **)(temp->key));
+	//navigate through list
+	while (&(*dwork)->work != key) {
+		//if it is last node
+		if (!temp->next) {
+			mutex_unlock(&mutex);
+			return NULL;
+		}
+			//store reference to current link
+			previous = temp;
+			//move to next link
+			temp = temp->next;
+			dwork = ((struct delayed_work **)(temp->key));
+	}
+
+	mutex_unlock(&mutex);
+	data = temp;
+	if (data) {
+		dwork_new = data->key;
+		if (flag)
+			kfree(data);
+	}
+	return dwork_new;
+}
diff --git a/drivers/staging/media/vxd/common/work_queue.h b/drivers/staging/media/vxd/common/work_queue.h
new file mode 100644
index 000000000000..44ed423334e2
--- /dev/null
+++ b/drivers/staging/media/vxd/common/work_queue.h
@@ -0,0 +1,66 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Work Queue Related Definitions
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ *	Lakshmi Sankar <lakshmisankar-t@ti.com>
+ *
+ * Re-written for upstream
+ *	Prashanth Kumar Amai <prashanth.ka@pathpartnertech.com>
+ */
+
+#ifndef WORKQUEUE_H_
+#define WORKQUEUE_H_
+
+#include <linux/types.h>
+
+enum {
+	HWA_DECODER   = 0,
+	HWA_ENCODER    = 1,
+	HWA_FORCE32BITS = 0x7FFFFFFFU
+};
+
+/*
+ * init_work - This function provides the necessary initialization
+ * and saving given pointer(work_args) in linked list.
+ * @work_args: structure for the initialization
+ * @work_fn: work function pointer
+ *
+ * This function provides the necessary initialization
+ * and setting of the handler function (passed by the user).
+ */
+void init_work(void **work_args, void *work_fn, uint8_t hwa_id);
+
+/*
+ * init_delayed_work - This function provides the necessary initialization.
+ * and saving given pointer(work_args) in linked list.
+ * @work_args: structure for the initialization
+ * @work_fn: work function pointer
+ *
+ * This function provides the necessary initialization
+ * and setting of the handler function (passed by the user).
+ */
+void init_delayed_work(void **work_args, void *work_fn, uint8_t hwa_id);
+
+/*
+ * get_delayed_work_buff - This function return base address of given pointer
+ * @key: The given work struct pointer
+ * @flag: If TRUE, delete the node from the linked list.
+ *
+ * Return: Base address of the given input buffer.
+ */
+void *get_delayed_work_buff(void *key, signed char flag);
+
+/**
+ * get_work_buff - This function return base address of given pointer
+ * @key: The given work struct pointer
+ * @flag: If TRUE, delete the node from the linked list.
+ *
+ * Return: Base address of the given input buffer.
+ */
+void *get_work_buff(void *key, signed char flag);
+
+#endif /* WORKQUEUE_H_ */
-- 
2.17.1


-- 






This
message contains confidential information and is intended only 
for the
individual(s) named. If you are not the intended
recipient, you are 
notified that disclosing, copying, distributing or taking any
action in 
reliance on the contents of this mail and attached file/s is strictly
prohibited. Please notify the
sender immediately and delete this e-mail 
from your system. E-mail transmission
cannot be guaranteed to be secured or 
error-free as information could be
intercepted, corrupted, lost, destroyed, 
arrive late or incomplete, or contain
viruses. The sender therefore does 
not accept liability for any errors or
omissions in the contents of this 
message, which arise as a result of e-mail
transmission.

^ permalink raw reply related	[flat|nested] 48+ messages in thread

* [PATCH 11/30] v4l: vxd-dec: Add TALMMU module
  2021-08-18 14:10 [PATCH 00/30] TI Video Decoder driver upstreaming to v5.14-rc6 kernel sidraya.bj
                   ` (9 preceding siblings ...)
  2021-08-18 14:10 ` [PATCH 10/30] v4l: vxd-dec: Add utility modules sidraya.bj
@ 2021-08-18 14:10 ` sidraya.bj
  2021-08-18 14:10 ` [PATCH 12/30] v4l: vxd-dec: Add VDEC MMU wrapper sidraya.bj
                   ` (20 subsequent siblings)
  31 siblings, 0 replies; 48+ messages in thread
From: sidraya.bj @ 2021-08-18 14:10 UTC (permalink / raw)
  To: gregkh, linux-staging, linux-kernel
  Cc: prashanth.ka, praneeth, mchehab, linux-media, praveen.ap, Sidraya

From: Sidraya <sidraya.bj@pathpartnertech.com>

It contains the implementation of Address allocation management
APIs, list processing primitives, generic resource allocations,
self scaling has tables and object pool memory allocator which
are needed for TALMMU functionality

Signed-off-by: Lakshmi Sankar <lakshmisankar-t@ti.com>
Signed-off-by: Sidraya <sidraya.bj@pathpartnertech.com>
---
 MAINTAINERS                                   |  10 +
 drivers/staging/media/vxd/common/addr_alloc.c | 499 +++++++++
 drivers/staging/media/vxd/common/addr_alloc.h | 238 +++++
 drivers/staging/media/vxd/common/hash.c       | 481 +++++++++
 drivers/staging/media/vxd/common/hash.h       |  86 ++
 drivers/staging/media/vxd/common/pool.c       | 228 ++++
 drivers/staging/media/vxd/common/pool.h       |  66 ++
 drivers/staging/media/vxd/common/ra.c         | 972 ++++++++++++++++++
 drivers/staging/media/vxd/common/ra.h         | 200 ++++
 drivers/staging/media/vxd/common/talmmu_api.c | 753 ++++++++++++++
 drivers/staging/media/vxd/common/talmmu_api.h | 246 +++++
 11 files changed, 3779 insertions(+)
 create mode 100644 drivers/staging/media/vxd/common/addr_alloc.c
 create mode 100644 drivers/staging/media/vxd/common/addr_alloc.h
 create mode 100644 drivers/staging/media/vxd/common/hash.c
 create mode 100644 drivers/staging/media/vxd/common/hash.h
 create mode 100644 drivers/staging/media/vxd/common/pool.c
 create mode 100644 drivers/staging/media/vxd/common/pool.h
 create mode 100644 drivers/staging/media/vxd/common/ra.c
 create mode 100644 drivers/staging/media/vxd/common/ra.h
 create mode 100644 drivers/staging/media/vxd/common/talmmu_api.c
 create mode 100644 drivers/staging/media/vxd/common/talmmu_api.h

diff --git a/MAINTAINERS b/MAINTAINERS
index 2668eeb89a34..2b0d0708d852 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -19537,8 +19537,12 @@ M:	Sidraya Jayagond <sidraya.bj@pathpartnertech.com>
 L:	linux-media@vger.kernel.org
 S:	Maintained
 F:	Documentation/devicetree/bindings/media/img,d5520-vxd.yaml
+F:	drivers/staging/media/vxd/common/addr_alloc.c
+F:	drivers/staging/media/vxd/common/addr_alloc.h
 F:	drivers/staging/media/vxd/common/dq.c
 F:	drivers/staging/media/vxd/common/dq.h
+F:	drivers/staging/media/vxd/common/hash.c
+F:	drivers/staging/media/vxd/common/hash.h
 F:	drivers/staging/media/vxd/common/idgen_api.c
 F:	drivers/staging/media/vxd/common/idgen_api.h
 F:	drivers/staging/media/vxd/common/img_mem_man.c
@@ -19548,6 +19552,12 @@ F:	drivers/staging/media/vxd/common/imgmmu.c
 F:	drivers/staging/media/vxd/common/imgmmu.h
 F:	drivers/staging/media/vxd/common/lst.c
 F:	drivers/staging/media/vxd/common/lst.h
+F:	drivers/staging/media/vxd/common/pool.c
+F:	drivers/staging/media/vxd/common/pool.h
+F:	drivers/staging/media/vxd/common/ra.c
+F:	drivers/staging/media/vxd/common/ra.h
+F:	drivers/staging/media/vxd/common/talmmu_api.c
+F:	drivers/staging/media/vxd/common/talmmu_api.h
 F:	drivers/staging/media/vxd/common/work_queue.c
 F:	drivers/staging/media/vxd/common/work_queue.h
 F:	drivers/staging/media/vxd/decoder/hw_control.c
diff --git a/drivers/staging/media/vxd/common/addr_alloc.c b/drivers/staging/media/vxd/common/addr_alloc.c
new file mode 100644
index 000000000000..393d309b2c0c
--- /dev/null
+++ b/drivers/staging/media/vxd/common/addr_alloc.c
@@ -0,0 +1,499 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Address allocation APIs - used to manage address allocation
+ * with a number of predefined regions.
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ *	Lakshmi Sankar <lakshmisankar-t@ti.com>
+ *
+ * Re-written for upstream
+ *	Sidraya Jayagond <sidraya.bj@pathpartnertech.com>
+ */
+
+#include <linux/slab.h>
+#include <linux/printk.h>
+#include <linux/mutex.h>
+#include <linux/dma-mapping.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-mem2mem.h>
+
+#include "addr_alloc.h"
+#include "hash.h"
+#include "img_errors.h"
+
+/* Global context. */
+static struct addr_context global_ctx = {0};
+/* Sub-system initialized. */
+static int global_initialized;
+/* Count of contexts. */
+static unsigned int num_ctx;
+/* Global mutex */
+static struct mutex *global_lock;
+
+/**
+ * addr_initialise - addr_initialise
+ */
+
+int addr_initialise(void)
+{
+	unsigned int result = IMG_ERROR_ALREADY_INITIALISED;
+
+	/* If we are not initialized */
+	if (!global_initialized)
+		result = addr_cx_initialise(&global_ctx);
+	return result;
+}
+
+int addr_cx_initialise(struct addr_context * const context)
+{
+	unsigned int result = IMG_ERROR_FATAL;
+
+	if (!context)
+		return IMG_ERROR_INVALID_PARAMETERS;
+
+	if (!global_initialized) {
+		/* Initialise context */
+		memset(context, 0x00, sizeof(struct addr_context));
+
+		/* If no mutex associated with this resource */
+		if (!global_lock) {
+			/* Create one */
+
+			global_lock = kzalloc(sizeof(*global_lock), GFP_KERNEL);
+			if (!global_lock)
+				return -ENOMEM;
+
+			mutex_init(global_lock);
+		}
+
+		mutex_lock_nested(global_lock, SUBCLASS_ADDR_ALLOC);
+
+		/* Initialise the hash functions. */
+		result = vid_hash_initialise();
+		if (result != IMG_SUCCESS) {
+			mutex_unlock(global_lock);
+			return IMG_ERROR_UNEXPECTED_STATE;
+		}
+
+		/* Initialise the arena functions */
+		result = vid_ra_initialise();
+		if (result != IMG_SUCCESS) {
+			mutex_unlock(global_lock);
+			result = vid_hash_finalise();
+			return IMG_ERROR_UNEXPECTED_STATE;
+		}
+
+		/* We are now initialized */
+		global_initialized = TRUE;
+		result = IMG_SUCCESS;
+	} else {
+		mutex_lock_nested(global_lock, SUBCLASS_ADDR_ALLOC);
+	}
+
+	num_ctx++;
+	mutex_unlock(global_lock);
+
+	return result;
+}
+
+int addr_deinitialise(void)
+{
+	return addr_cx_deinitialise(&global_ctx);
+}
+
+int addr_cx_deinitialise(struct addr_context * const context)
+{
+	struct addr_region *tmp_region = NULL;
+	unsigned int result = IMG_ERROR_FATAL;
+
+	if (!context)
+		return IMG_ERROR_INVALID_PARAMETERS;
+
+	if (global_initialized) {
+		mutex_lock_nested(global_lock, SUBCLASS_ADDR_ALLOC);
+
+		tmp_region = context->regions;
+
+		/* Delete all arena structure */
+		if (context->default_region)
+			result = vid_ra_delete(context->default_region->arena);
+
+		while (tmp_region) {
+			result = vid_ra_delete(tmp_region->arena);
+			tmp_region = tmp_region->nxt_region;
+		}
+
+		if (num_ctx != 0)
+			num_ctx--;
+
+		result = IMG_SUCCESS;
+		if (num_ctx == 0) {
+			/* Free off resources */
+			result = vid_hash_finalise();
+			result = vid_ra_deinit();
+			global_initialized = FALSE;
+
+			mutex_unlock(global_lock);
+			mutex_destroy(global_lock);
+			kfree(global_lock);
+			global_lock = NULL;
+		} else {
+			mutex_unlock(global_lock);
+		}
+	}
+
+	return result;
+}
+
+int addr_define_mem_region(struct addr_region * const region)
+{
+	return addr_cx_define_mem_region(&global_ctx, region);
+}
+
+int addr_cx_define_mem_region(struct addr_context * const context,
+			      struct addr_region * const region)
+{
+	struct addr_region *tmp_region = NULL;
+	unsigned int result = IMG_SUCCESS;
+
+	if (!context || !region)
+		return IMG_ERROR_INVALID_PARAMETERS;
+
+	mutex_lock_nested(global_lock, SUBCLASS_ADDR_ALLOC);
+
+	tmp_region = context->regions;
+
+	/* Ensure the link to the next is NULL */
+	region->nxt_region = NULL;
+
+	/* If this is the default memory region */
+	if (!region->name) {
+		/* Should not previously have been defined */
+		if (context->default_region) {
+			mutex_unlock(global_lock);
+			return IMG_ERROR_UNEXPECTED_STATE;
+		}
+
+		context->default_region = region;
+		context->no_regions++;
+
+		/*
+		 * Create an arena for memory allocation
+		 * name of resource arena for debug
+		 * start of resource
+		 * size of resource
+		 * allocation quantum
+		 * import allocator
+		 * import deallocator
+		 * import handle
+		 */
+		result = vid_ra_create("memory",
+				       region->base_addr,
+				       region->size,
+				       1,
+				       NULL,
+				       NULL,
+				       NULL,
+				       &region->arena);
+
+		if (result != IMG_SUCCESS) {
+			mutex_unlock(global_lock);
+			return IMG_ERROR_UNEXPECTED_STATE;
+		}
+	} else {
+		/*
+		 * Run down the list of existing named regions
+		 * to check if there is a region with this name
+		 */
+		while (tmp_region &&
+		       (strcmp(region->name, tmp_region->name) != 0) &&
+		       tmp_region->nxt_region) {
+			tmp_region = tmp_region->nxt_region;
+		}
+
+		/* If we have items in the list */
+		if (tmp_region) {
+			/*
+			 * Check we didn't stop because the name
+			 * clashes with one already defined.
+			 */
+
+			if (strcmp(region->name, tmp_region->name) == 0 ||
+			    tmp_region->nxt_region) {
+				mutex_unlock(global_lock);
+				return IMG_ERROR_UNEXPECTED_STATE;
+			}
+
+			/* Add to end of list */
+			tmp_region->nxt_region = region;
+		} else {
+			/* Add to head of list */
+			context->regions = region;
+		}
+
+		context->no_regions++;
+
+		/*
+		 * Create an arena for memory allocation
+		 * name of resource arena for debug
+		 * start of resource
+		 * size of resource
+		 * allocation quantum
+		 * import allocator
+		 * import deallocator
+		 * import handle
+		 */
+		result = vid_ra_create(region->name,
+				       region->base_addr,
+				       region->size,
+				       1,
+				       NULL,
+				       NULL,
+				       NULL,
+				       &region->arena);
+
+		if (result != IMG_SUCCESS) {
+			mutex_unlock(global_lock);
+			return IMG_ERROR_UNEXPECTED_STATE;
+		}
+	}
+
+	mutex_unlock(global_lock);
+
+	/* Check the arean was created OK */
+	if (!region->arena)
+		return IMG_ERROR_UNEXPECTED_STATE;
+
+	return result;
+}
+
+int addr_malloc(const unsigned char * const name,
+		unsigned long long size,
+		unsigned long long * const base_adr)
+{
+	return addr_cx_malloc(&global_ctx, name, size, base_adr);
+}
+
+int addr_cx_malloc(struct addr_context * const context,
+		   const unsigned char * const name,
+		   unsigned long long size,
+		   unsigned long long * const base_adr)
+{
+	unsigned int result = IMG_ERROR_FATAL;
+	struct addr_region *tmp_region = NULL;
+
+	if (!context || !base_adr || !name)
+		return IMG_ERROR_INVALID_PARAMETERS;
+
+	*(base_adr) = (unsigned long long)-1LL;
+
+	mutex_lock_nested(global_lock, SUBCLASS_ADDR_ALLOC);
+
+	tmp_region = context->regions;
+
+	/*
+	 * Run down the list of existing named
+	 * regions to locate this
+	 */
+	while (tmp_region && (strcmp(name, tmp_region->name) != 0) && (tmp_region->nxt_region))
+		tmp_region = tmp_region->nxt_region;
+
+	/* If there was no match. */
+	if (!tmp_region || (strcmp(name, tmp_region->name) != 0)) {
+		/* Use the default */
+		if (!context->default_region) {
+			mutex_unlock(global_lock);
+			return IMG_ERROR_UNEXPECTED_STATE;
+		}
+
+		tmp_region = context->default_region;
+	}
+
+	if (!tmp_region) {
+		mutex_unlock(global_lock);
+		return IMG_ERROR_UNEXPECTED_STATE;
+	}
+
+	/* Allocate size + guard band */
+	result = vid_ra_alloc(tmp_region->arena,
+			      size + tmp_region->guard_band,
+			      NULL,
+			      NULL,
+			      SEQUENTIAL_ALLOCATION,
+			      1,
+			      base_adr);
+	if (result != IMG_SUCCESS) {
+		mutex_unlock(global_lock);
+		return IMG_ERROR_OUT_OF_MEMORY;
+	}
+
+	mutex_unlock(global_lock);
+
+	return result;
+}
+
+int addr_cx_malloc_res(struct addr_context * const context,
+		       const unsigned char * const name,
+		       unsigned long long size,
+		       unsigned long long * const base_adr)
+{
+	unsigned int result = IMG_ERROR_FATAL;
+	struct addr_region *tmp_region = NULL;
+
+	if (!context || !base_adr || !name)
+		return IMG_ERROR_INVALID_PARAMETERS;
+
+	mutex_lock_nested(global_lock, SUBCLASS_ADDR_ALLOC);
+
+	tmp_region = context->regions;
+	/* If the allocation is for the default region */
+	/*
+	 * Run down the list of existing named
+	 * regions to locate this
+	 */
+	while (tmp_region && (strcmp(name, tmp_region->name) != 0) && (tmp_region->nxt_region))
+		tmp_region = tmp_region->nxt_region;
+
+	/* If there was no match. */
+	if (!tmp_region || (strcmp(name, tmp_region->name) != 0)) {
+		/* Use the default */
+		if (!context->default_region) {
+			mutex_unlock(global_lock);
+			return IMG_ERROR_UNEXPECTED_STATE;
+		}
+		tmp_region = context->default_region;
+	}
+	if (!tmp_region) {
+		mutex_unlock(global_lock);
+		return IMG_ERROR_UNEXPECTED_STATE;
+	}
+	/* Allocate size + guard band */
+	result = vid_ra_alloc(tmp_region->arena, size + tmp_region->guard_band,
+			      NULL, NULL, SEQUENTIAL_ALLOCATION, 1, base_adr);
+	if (result != IMG_SUCCESS) {
+		mutex_unlock(global_lock);
+		return IMG_ERROR_OUT_OF_MEMORY;
+	}
+	mutex_unlock(global_lock);
+
+	return result;
+}
+
+int addr_cx_malloc_align_res(struct addr_context * const context,
+			     const unsigned char * const name,
+			     unsigned long long size,
+			     unsigned long long alignment,
+			     unsigned long long * const base_adr)
+{
+	unsigned int result;
+	struct addr_region *tmp_region = NULL;
+
+	if (!context || !base_adr || !name)
+		return IMG_ERROR_INVALID_PARAMETERS;
+
+	mutex_lock_nested(global_lock, SUBCLASS_ADDR_ALLOC);
+
+	tmp_region = context->regions;
+
+	/*
+	 * Run down the list of existing named
+	 * regions to locate this
+	 */
+	while (tmp_region &&
+	       (strcmp(name, tmp_region->name) != 0) &&
+	       (tmp_region->nxt_region)) {
+		tmp_region = tmp_region->nxt_region;
+	}
+	/* If there was no match. */
+	if (!tmp_region ||
+	    (strcmp(name, tmp_region->name) != 0)) {
+		/* Use the default */
+		if (!context->default_region) {
+			mutex_unlock(global_lock);
+			return IMG_ERROR_UNEXPECTED_STATE;
+		}
+
+		tmp_region = context->default_region;
+	}
+
+	if (!tmp_region) {
+		mutex_unlock(global_lock);
+		return IMG_ERROR_UNEXPECTED_STATE;
+	}
+	/* Allocate size + guard band */
+	result = vid_ra_alloc(tmp_region->arena,
+			      size + tmp_region->guard_band,
+			      NULL,
+			      NULL,
+			      SEQUENTIAL_ALLOCATION,
+			      alignment,
+			      base_adr);
+	if (result != IMG_SUCCESS) {
+		mutex_unlock(global_lock);
+		return IMG_ERROR_OUT_OF_MEMORY;
+	}
+
+	mutex_unlock(global_lock);
+
+	return result;
+}
+
+int addr_free(const unsigned char * const name, unsigned long long addr)
+{
+	return addr_cx_free(&global_ctx, name, addr);
+}
+
+int addr_cx_free(struct addr_context * const context,
+		 const unsigned char * const name,
+		 unsigned long long addr)
+{
+	struct addr_region *tmp_region;
+	unsigned int result;
+
+	if (!context)
+		return IMG_ERROR_INVALID_PARAMETERS;
+
+	tmp_region = context->regions;
+
+	mutex_lock_nested(global_lock, SUBCLASS_ADDR_ALLOC);
+
+	/* If the allocation is for the default region */
+	if (!name) {
+		if (!context->default_region) {
+			result = IMG_ERROR_INVALID_PARAMETERS;
+			goto error;
+		}
+		tmp_region = context->default_region;
+	} else {
+		/*
+		 * Run down the list of existing named
+		 * regions to locate this
+		 */
+		while (tmp_region &&
+		       (strcmp(name, tmp_region->name) != 0) &&
+		       tmp_region->nxt_region) {
+			tmp_region = tmp_region->nxt_region;
+		}
+
+		/* If there was no match */
+		if (!tmp_region || (strcmp(name, tmp_region->name) != 0)) {
+			/* Use the default */
+			if (!context->default_region) {
+				result = IMG_ERROR_INVALID_PARAMETERS;
+				goto error;
+			}
+			tmp_region = context->default_region;
+		}
+	}
+
+	/* Free the address */
+	result = vid_ra_free(tmp_region->arena, addr);
+
+error:
+	mutex_unlock(global_lock);
+	return result;
+}
diff --git a/drivers/staging/media/vxd/common/addr_alloc.h b/drivers/staging/media/vxd/common/addr_alloc.h
new file mode 100644
index 000000000000..387418b124e4
--- /dev/null
+++ b/drivers/staging/media/vxd/common/addr_alloc.h
@@ -0,0 +1,238 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Address allocation management API.
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ *	Lakshmi Sankar <lakshmisankar-t@ti.com>
+ *
+ * Re-written for upstream
+ *	Sidraya Jayagond <sidraya.bj@pathpartnertech.com>
+ */
+#ifndef __ADDR_ALLOC_H__
+#define __ADDR_ALLOC_H__
+
+#include <linux/types.h>
+#include "ra.h"
+
+/* Defines whether sequential or random allocation is used */
+enum {
+	SEQUENTIAL_ALLOCATION,
+	RANDOM_ALLOCATION,
+	RANDOM_FORCE32BITS = 0x7FFFFFFFU
+};
+
+/**
+ * struct addr_region - Memory region structure
+ *@name: A pointer to a sring containing the name of the region.
+ *		NULL for the default memory region.
+ *@base_addr: The base address of the memory region.
+ *@size: The size of the memory region.
+ *@guard_band: The size of any guard band to be used.
+ *		Guard bands can be useful in separating block allocations
+ *		and allows the caller to detect erroneous accesses
+ *		into these areas.
+ *@nxt_region:Used internally by the ADDR API.A pointer used to point
+ *		to the next memory region.
+ *@arena: Used internally by the ADDR API. A to a structure used to
+ *		maintain and perform address allocation.
+ *
+ * This structure contains information about the memory region.
+ */
+struct addr_region {
+	unsigned char *name;
+	unsigned long long base_addr;
+	unsigned long long size;
+	unsigned int guard_band;
+	struct addr_region *nxt_region;
+	void *arena;
+};
+
+/*
+ * This structure contains the context for allocation.
+ *@regions: Pointer the first region in the list.
+ *@default_region: Pointer the default region.
+ *@no_regions: Number of regions currently available (including default)
+ */
+struct addr_context {
+	struct addr_region *regions;
+	struct addr_region *default_region;
+	unsigned int no_regions;
+};
+
+/*
+ * @Function	ADDR_Initialise
+ * @Description
+ * This function is used to initialise the address alocation sub-system.
+ * NOTE: This function may be called multiple times. The initialisation only
+ * happens the first time it is called.
+ * @Return	IMG_SUCCESS or an error code.
+ */
+int addr_initialise(void);
+
+/*
+ * @Function	addr_deinitialise
+ * @Description
+ * This function is used to de-initialise the address alocation sub-system.
+ * @Return	IMG_SUCCESS or an error code.
+ */
+int addr_deinitialise(void);
+
+/*
+ * @Function	addr_define_mem_region
+ * @Description
+ * This function is used define a memory region.
+ * NOTE: The region structure MUST be defined in static memory as this
+ * is retained and used by the ADDR sub-system.
+ * NOTE: Invalid parameters are trapped by asserts.
+ * @Input	region: A pointer to a region structure.
+ * @Return	IMG_RESULT  : IMG_SUCCESS or an error code.
+ */
+int addr_define_mem_region(struct addr_region * const region);
+
+/*
+ * @Function	addr_malloc
+ * @Description
+ * This function is used allocate space within a memory region.
+ * NOTE: Allocation failures or invalid parameters are trapped by asserts.
+ * @Input	name: Is a pointer the name of the memory region.
+ *		NULL can be used to allocate space from the
+ *		default memory region.
+ * @Input	size: The size (in bytes) of the allocation.
+ * @Output	base_adr : The address of the allocated space.
+ * @Return	IMG_SUCCESS or an error code.
+ */
+int addr_malloc(const unsigned char *const name,
+		unsigned long long size,
+		unsigned long long *const base_adr);
+
+/*
+ * @Function	addr_free
+ * @Description
+ * This function is used free a previously allocate space within
+ * a memory region.
+ * NOTE: Invalid parameters are trapped by asserts.
+ * @Input	name: Is a pointer to the name of the memory region.
+ *		NULL is used to free space from the default memory region.
+ *@Input	addr: The address allocated.
+ *@Return	IMG_SUCCESS or an error code.
+ */
+int addr_free(const unsigned char * const name, unsigned long long addr);
+
+/*
+ * @Function	addr_cx_initialise
+ * @Description
+ * This function is used to initialise the address allocation sub-system with
+ * an external context structure.
+ * NOTE: This function should be call only once for the context.
+ * @Input	context : Pointer to context structure.
+ * @Return	IMG_SUCCESS or an error code.
+ */
+int addr_cx_initialise(struct addr_context * const context);
+
+/*
+ * @Function	addr_cx_deinitialise
+ * @Description
+ * This function is used to de-initialise the address allocation
+ * sub-system with an external context structure.
+ * @Input	context : Pointer to context structure.
+ * @Return	IMG_SUCCESS or an error code.
+ */
+int addr_cx_deinitialise(struct addr_context * const context);
+
+/*
+ * @Function	addr_cx_define_mem_region
+ * @Description
+ * This function is used define a memory region with an external
+ * context structure.
+ * NOTE: The region structure MUST be defined in static memory as this
+ * is retained and used by the ADDR sub-system.
+ * NOTE: Invalid parameters are trapped by asserts.
+ * @Input	context : Pointer to context structure.
+ * @Input	region : A pointer to a region structure.
+ * @Return	IMG_SUCCESS or an error code.
+ */
+int addr_cx_define_mem_region(struct addr_context *const context,
+			      struct addr_region *const region);
+
+/*
+ * @Function	addr_cx_malloc
+ * @Description
+ * This function is used allocate space within a memory region with
+ * an external context structure.
+ * NOTE: Allocation failures or invalid parameters are trapped by asserts.
+ * @Input	context : Pointer to context structure.
+ * @Input	name : Is a pointer the name of the memory region.
+ *		NULL can be used to allocate space from the
+ *		default memory region.
+ * @Input	size : The size (in bytes) of the allocation.
+ * @Output	base_adr : The address of the allocated space.
+ * @Return	IMG_SUCCESS or an error code.
+ */
+int addr_cx_malloc(struct addr_context * const context,
+		   const unsigned char *const name,
+		   unsigned long long size,
+		   unsigned long long *const base_adr);
+
+/*
+ * @Function	addr_cx_malloc_res
+ * @Description
+ * This function is used allocate space within a memory region with
+ * an external context structure.
+ * NOTE: Allocation failures are returned in IMG_RESULT, however invalid
+ * parameters are trapped by asserts.
+ * @Input	context : Pointer to context structure.
+ * @Input	name : Is a pointer the name of the memory region.
+ *		NULL can be used to allocate space from the
+ *		default memory region.
+ * @Input	size : The size (in bytes) of the allocation.
+ * @Input	base_adr : Pointer to the address of the allocated space.
+ * @Return	IMG_SUCCESS or an error code.
+ */
+int addr_cx_malloc_res(struct addr_context *const context,
+		       const unsigned char *const name,
+		       unsigned long long size,
+		       unsigned long long * const base_adr);
+
+/*
+ * @Function	addr_cx_malloc1_res
+ * @Description
+ * This function is used allocate space within a memory region with
+ * an external context structure.
+ * NOTE: Allocation failures are returned in IMG_RESULT, however invalid
+ * parameters are trapped by asserts.
+ * @Input	context : Pointer to context structure.
+ * @Input	name : Is a pointer the name of the memory region.
+ *		NULL can be used to allocate space from the
+ *		default memory region.
+ * @Input	size	: The size (in bytes) of the allocation.
+ * @Input	alignment : The required byte alignment (1, 2, 4, 8, 16 etc).
+ * @Input	base_adr : Pointer to the address of the allocated space.
+ * @Return	IMG_SUCCESS or an error code.
+ */
+int addr_cx_malloc_align_res(struct addr_context *const context,
+			     const unsigned char *const name,
+			     unsigned long long size,
+			     unsigned long long alignment,
+			     unsigned long long *const base_adr);
+
+/*
+ * @Function	addr_cx_free
+ * @Description
+ * This function is used free a previously allocate space within a memory region
+ * with an external context structure.
+ * NOTE: Invalid parameters are trapped by asserts.
+ * @Input	context : Pointer to context structure.
+ * @Input	name : Is a pointer the name of the memory region.
+ *		NULL is used to free space from the
+ *		default memory region.
+ * @Input	addr : The address allocated.
+ * @Return	IMG_SUCCESS or an error code.
+ */
+int addr_cx_free(struct addr_context *const context,
+		 const unsigned char *const name,
+		 unsigned long long addr);
+
+#endif /* __ADDR_ALLOC_H__	*/
diff --git a/drivers/staging/media/vxd/common/hash.c b/drivers/staging/media/vxd/common/hash.c
new file mode 100644
index 000000000000..1a03aecc34ef
--- /dev/null
+++ b/drivers/staging/media/vxd/common/hash.c
@@ -0,0 +1,481 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Self scaling hash tables.
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ *	Lakshmi Sankar <lakshmisankar-t@ti.com>
+ *
+ * Re-written for upstream
+ *	Sidraya Jayagond <sidraya.bj@pathpartnertech.com>
+ */
+
+#include <linux/dma-mapping.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-mem2mem.h>
+
+#include "hash.h"
+#include "img_errors.h"
+#include "pool.h"
+
+/* pool of struct hash objects */
+static struct pool *global_hashpool;
+
+/* pool of struct bucket objects */
+static struct pool *global_bucketpool;
+
+static int global_initialized;
+
+/* Each entry in a hash table is placed into a bucket */
+struct bucket {
+	struct bucket *next;
+	unsigned long long key;
+	unsigned long long value;
+};
+
+struct hash {
+	struct bucket **table;
+	unsigned int size;
+	unsigned int count;
+	unsigned int minimum_size;
+};
+
+/**
+ * hash_func - Hash function intended for hashing addresses.
+ * @vale : The key to hash.
+ * @size : The size of the hash table
+ */
+static unsigned int hash_func(unsigned long long vale,
+			      unsigned int size)
+{
+	unsigned int hash = (unsigned int)(vale);
+
+	hash += (hash << 12);
+	hash ^= (hash >> 22);
+	hash += (hash << 4);
+	hash ^= (hash >> 9);
+	hash += (hash << 10);
+	hash ^= (hash >> 2);
+	hash += (hash << 7);
+	hash ^= (hash >> 12);
+	hash &= (size - 1);
+	return hash;
+}
+
+/*
+ * @Function	hash_chain_insert
+ * @Description
+ * Hash function intended for hashing addresses.
+ * @Input	bucket : The bucket
+ * @Input	table : The hash table
+ * @Input	size : The size of the hash table
+ * @Return	IMG_SUCCESS or an error code.
+ */
+static int hash_chain_insert(struct bucket *bucket,
+			     struct bucket **table,
+			     unsigned int size)
+{
+	unsigned int idx;
+	unsigned int result = IMG_ERROR_FATAL;
+
+	if (!bucket || !table || !size) {
+		result = IMG_ERROR_INVALID_PARAMETERS;
+		return result;
+	}
+
+	idx = hash_func(bucket->key, size);
+
+	if (idx < size) {
+		result = IMG_SUCCESS;
+		bucket->next = table[idx];
+		table[idx] = bucket;
+	}
+
+	return result;
+}
+
+/*
+ * @Function	hash_rehash
+ * @Description
+ * Iterate over every entry in an old hash table and rehash into the new table.
+ * @Input	old_table : The old hash table
+ * @Input	old_size : The size of the old hash table
+ * @Input	new_table : The new hash table
+ * @Input	new_sz : The size of the new hash table
+ * @Return	IMG_SUCCESS or an error code.
+ */
+static int hash_rehash(struct bucket **old_table,
+		       unsigned int old_size,
+		       struct bucket **new_table,
+		       unsigned int new_sz)
+{
+	unsigned int idx;
+	unsigned int result = IMG_ERROR_FATAL;
+
+	if (!old_table || !new_table) {
+		result = IMG_ERROR_INVALID_PARAMETERS;
+		return result;
+	}
+
+	for (idx = 0; idx < old_size; idx++) {
+		struct bucket *bucket;
+		struct bucket *nex_bucket;
+
+		bucket = old_table[idx];
+		while (bucket) {
+			nex_bucket = bucket->next;
+			result = hash_chain_insert(bucket, new_table, new_sz);
+			if (result != IMG_SUCCESS) {
+				result = IMG_ERROR_UNEXPECTED_STATE;
+				return result;
+			}
+			bucket = nex_bucket;
+		}
+	}
+	result = IMG_SUCCESS;
+
+	return result;
+}
+
+/*
+ * @Function	hash_resize
+ * @Description
+ * Attempt to resize a hash table, failure to allocate a new larger hash table
+ * is not considered a hard failure. We simply continue and allow the table to
+ * fill up, the effect is to allow hash chains to become longer.
+ * @Input	hash_arg : Pointer to the hash table
+ * @Input	new_sz : The size of the new hash table
+ * @Return	IMG_SUCCESS or an error code.
+ */
+static int hash_resize(struct hash *hash_arg,
+		       unsigned int new_sz)
+{
+	unsigned int malloc_sz = 0;
+	unsigned int result = IMG_ERROR_FATAL;
+	unsigned int idx;
+
+	if (!hash_arg) {
+		result = IMG_ERROR_INVALID_PARAMETERS;
+		return result;
+	}
+
+	if (new_sz != hash_arg->size) {
+		struct bucket **new_bkt_table;
+
+		malloc_sz = (sizeof(struct bucket *) * new_sz);
+		new_bkt_table = kmalloc(malloc_sz, GFP_KERNEL);
+
+		if (!new_bkt_table) {
+			result = IMG_ERROR_MALLOC_FAILED;
+			return result;
+		}
+
+		for (idx = 0; idx < new_sz; idx++)
+			new_bkt_table[idx] = NULL;
+
+		result = hash_rehash(hash_arg->table,
+				     hash_arg->size,
+				     new_bkt_table,
+				     new_sz);
+
+		if (result != IMG_SUCCESS) {
+			kfree(new_bkt_table);
+			new_bkt_table = NULL;
+			result = IMG_ERROR_UNEXPECTED_STATE;
+			return result;
+		}
+
+		kfree(hash_arg->table);
+		hash_arg->table = new_bkt_table;
+		hash_arg->size = new_sz;
+	}
+	result = IMG_SUCCESS;
+
+	return result;
+}
+
+static unsigned int private_max(unsigned int a, unsigned int b)
+{
+	unsigned int ret = (a > b) ? a : b;
+	return ret;
+}
+
+/*
+ * @Function	vid_hash_initialise
+ * @Description
+ * To initialise the hash module.
+ * @Input	None
+ * @Return	IMG_SUCCESS or an error code.
+ */
+int vid_hash_initialise(void)
+{
+	unsigned int result = IMG_ERROR_ALREADY_COMPLETE;
+
+	if (!global_initialized) {
+		if (global_hashpool || global_bucketpool) {
+			result = IMG_ERROR_UNEXPECTED_STATE;
+			return result;
+		}
+
+		result = pool_create("img-hash",
+				     sizeof(struct hash),
+				     &global_hashpool);
+
+		if (result != IMG_SUCCESS) {
+			result = IMG_ERROR_UNEXPECTED_STATE;
+			return result;
+		}
+
+		result = pool_create("img-sBucket",
+				     sizeof(struct bucket),
+				     &global_bucketpool);
+		if (result != IMG_SUCCESS) {
+			if (global_bucketpool) {
+				result = pool_delete(global_bucketpool);
+				global_bucketpool = NULL;
+			}
+			result = IMG_ERROR_UNEXPECTED_STATE;
+			return result;
+		}
+		global_initialized = true;
+		result = IMG_SUCCESS;
+	}
+	return result;
+}
+
+/*
+ * @Function	vid_hash_finalise
+ * @Description
+ * To finalise the hash module. All allocated hash tables should
+ * be deleted before calling this function.
+ * @Input	None
+ * @Return	IMG_SUCCESS or an error code.
+ */
+int vid_hash_finalise(void)
+{
+	unsigned int result = IMG_ERROR_FATAL;
+
+	if (global_initialized) {
+		if (global_hashpool) {
+			result = pool_delete(global_hashpool);
+			if (result != IMG_SUCCESS)
+				return result;
+
+			global_hashpool = NULL;
+		}
+
+		if (global_bucketpool) {
+			result = pool_delete(global_bucketpool);
+			if (result != IMG_SUCCESS)
+				return result;
+
+			global_bucketpool = NULL;
+		}
+		global_initialized = false;
+		result = IMG_SUCCESS;
+	}
+
+	return result;
+}
+
+/*
+ * @Function	vid_hash_create
+ * @Description
+ * Create a self scaling hash table.
+ * @Input	initial_size : Initial and minimum size of the hash table.
+ * @Output	hash_arg : Will countin the hash table handle or NULL.
+ * @Return	IMG_SUCCESS or an error code.
+ */
+int vid_hash_create(unsigned int initial_size,
+		    struct hash ** const hash_arg)
+{
+	unsigned int idx;
+	unsigned int tbl_sz = 0;
+	unsigned int result = IMG_ERROR_FATAL;
+	struct hash *local_hash = NULL;
+
+	if (!hash_arg) {
+		result = IMG_ERROR_INVALID_PARAMETERS;
+		return result;
+	}
+
+	if (global_initialized) {
+		pool_alloc(global_hashpool, ((void **)&local_hash));
+		if (!local_hash) {
+			result = IMG_ERROR_UNEXPECTED_STATE;
+			*hash_arg = NULL;
+			return result;
+		}
+
+		local_hash->count = 0;
+		local_hash->size = initial_size;
+		local_hash->minimum_size = initial_size;
+
+		tbl_sz = (sizeof(struct bucket *) * local_hash->size);
+		local_hash->table = kmalloc(tbl_sz, GFP_KERNEL);
+		if (!local_hash->table) {
+			result = pool_free(global_hashpool, local_hash);
+			if (result != IMG_SUCCESS)
+				result = IMG_ERROR_UNEXPECTED_STATE;
+			result |= IMG_ERROR_MALLOC_FAILED;
+			*hash_arg = NULL;
+			return result;
+		}
+
+		for (idx = 0; idx < local_hash->size; idx++)
+			local_hash->table[idx] = NULL;
+
+		*hash_arg = local_hash;
+		result = IMG_SUCCESS;
+	}
+	return result;
+}
+
+/*
+ * @Function	vid_hash_delete
+ * @Description
+ * To delete a hash table, all entries in the table should be
+ * removed before calling this function.
+ * @Input	hash_arg : Hash table pointer
+ * @Return	IMG_SUCCESS or an error code.
+ */
+int vid_hash_delete(struct hash * const hash_arg)
+{
+	unsigned int result = IMG_ERROR_FATAL;
+
+	if (!hash_arg) {
+		result = IMG_ERROR_INVALID_PARAMETERS;
+		return result;
+	}
+
+	if (global_initialized) {
+		if (hash_arg->count != 0) {
+			result = IMG_ERROR_UNEXPECTED_STATE;
+			return result;
+		}
+
+		kfree(hash_arg->table);
+		hash_arg->table = NULL;
+
+		result = pool_free(global_hashpool, hash_arg);
+		if (result != IMG_SUCCESS) {
+			result = IMG_ERROR_UNEXPECTED_STATE;
+			return result;
+		}
+	}
+	return result;
+}
+
+/*
+ * @Function	vid_hash_insert
+ * @Description
+ * To insert a key value pair into a hash table.
+ * @Input	hash_arg : Hash table pointer
+ * @Input	key : Key value
+ * @Input	value : The value associated with the key.
+ * @Return	IMG_SUCCESS or an error code.
+ */
+int vid_hash_insert(struct hash * const hash_arg,
+		    unsigned long long key,
+		    unsigned long long value)
+{
+	struct bucket *ps_bucket = NULL;
+	unsigned int result = IMG_ERROR_FATAL;
+
+	if (!hash_arg) {
+		result = IMG_ERROR_INVALID_PARAMETERS;
+		return result;
+	}
+
+	if (global_initialized) {
+		result = pool_alloc(global_bucketpool, ((void **)&ps_bucket));
+		if (result != IMG_SUCCESS || !ps_bucket) {
+			result = IMG_ERROR_UNEXPECTED_STATE;
+			return result;
+		}
+		ps_bucket->next = NULL;
+		ps_bucket->key = key;
+		ps_bucket->value = value;
+
+		result = hash_chain_insert(ps_bucket,
+					   hash_arg->table,
+					   hash_arg->size);
+
+		if (result != IMG_SUCCESS) {
+			pool_free(global_bucketpool, ((void **)&ps_bucket));
+			result = IMG_ERROR_UNEXPECTED_STATE;
+			return result;
+		}
+
+		hash_arg->count++;
+
+		/* check if we need to think about re-balancing */
+		if ((hash_arg->count << 1) > hash_arg->size) {
+			result = hash_resize(hash_arg, (hash_arg->size << 1));
+			if (result != IMG_SUCCESS) {
+				result = IMG_ERROR_UNEXPECTED_STATE;
+				return result;
+			}
+		}
+		result = IMG_SUCCESS;
+	}
+	return result;
+}
+
+/*
+ * @Function	vid_hash_remove
+ * @Description
+ * To remove a key value pair from a hash table
+ * @Input	hash_arg : Hash table pointer
+ * @Input	key : Key value
+ * @Input	ret_result : 0 if the key is missing or the value
+ *		associated with the key.
+ * @Return	IMG_SUCCESS or an error code.
+ */
+int vid_hash_remove(struct hash * const hash_arg,
+		    unsigned long long key,
+		    unsigned long * const ret_result)
+{
+	unsigned int idx;
+	unsigned int tmp1 = 0;
+	unsigned int tmp2 = 0;
+	unsigned int result = IMG_ERROR_FATAL;
+	struct bucket **bucket = NULL;
+
+	if (!hash_arg) {
+		result = IMG_ERROR_INVALID_PARAMETERS;
+		return result;
+	}
+
+	idx = hash_func(key, hash_arg->size);
+
+	for (bucket = &hash_arg->table[idx]; (*bucket) != NULL;
+		bucket = &((*bucket)->next)) {
+		if ((*bucket)->key == key) {
+			struct bucket *ps_bucket = (*bucket);
+
+			unsigned long long value = ps_bucket->value;
+
+			*bucket = ps_bucket->next;
+			result = pool_free(global_bucketpool, ps_bucket);
+
+			hash_arg->count--;
+
+			/* check if we need to think about re-balencing */
+			if (hash_arg->size > (hash_arg->count << 2) &&
+			    hash_arg->size > hash_arg->minimum_size) {
+				tmp1 = (hash_arg->size >> 1);
+				tmp2 = hash_arg->minimum_size;
+				result = hash_resize(hash_arg,
+						     private_max(tmp1, tmp2));
+			}
+			*ret_result = value;
+			result = IMG_SUCCESS;
+			break;
+		}
+	}
+	return result;
+}
diff --git a/drivers/staging/media/vxd/common/hash.h b/drivers/staging/media/vxd/common/hash.h
new file mode 100644
index 000000000000..91034d1ba441
--- /dev/null
+++ b/drivers/staging/media/vxd/common/hash.h
@@ -0,0 +1,86 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Self scaling hash tables.
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ *	Lakshmi Sankar <lakshmisankar-t@ti.com>
+ *
+ * Re-written for upstream
+ *      Sidraya Jayagond <sidraya.bj@pathpartnertech.com>
+ */
+#ifndef _HASH_H_
+#define _HASH_H_
+
+#include <linux/types.h>
+struct hash;
+
+/**
+ * vid_hash_initialise - VID_HASH_Initialise
+ * @Input	 None
+ *
+ * To initialise the hash module.
+ */
+int vid_hash_initialise(void);
+
+/*
+ * @Function	VID_HASH_Finalise
+ * @Description
+ * To finalise the hash module. All allocated hash tables should
+ * be deleted before calling this function.
+ * @Input	None
+ * @Return	IMG_SUCCESS or an error code.
+ */
+int vid_hash_finalise(void);
+
+/*
+ * @Function	VID_HASH_Create
+ * @Description
+ * Create a self scaling hash table.
+ * @Input	initial_size : Initial and minimum size of the hash table.
+ * @Output	hash : Hash table handle or NULL.
+ * @Return	IMG_SUCCESS or an error code.
+ */
+int vid_hash_create(unsigned int initial_size,
+		    struct hash ** const hash_hndl);
+
+/*
+ * @Function	VID_HASH_Delete
+ * @Description
+ * To delete a hash table, all entries in the table should be
+ * removed before calling this function.
+ * @Input	hash : Hash table pointer
+ * @Return	IMG_SUCCESS or an error code.
+ */
+int vid_hash_delete(struct hash * const ps_hash);
+
+/*
+ * @Function	VID_HASH_Insert
+ * @Description
+ * To insert a key value pair into a hash table.
+ * @Input	ps_hash : Hash table pointer
+ * @Input	key : Key value
+ * @Input	value : The value associated with the key.
+ * @Return	IMG_SUCCESS or an error code.
+ */
+int vid_hash_insert(struct hash * const ps_hash,
+		    unsigned long long key,
+		    unsigned long long value);
+
+/*
+ * @Function	VID_HASH_Remove
+ * @Description
+ * To remove a key value pair from a hash table
+ * @Input	ps_hash : Hash table pointer
+ * @Input	key : Key value
+ * @Input	result : 0 if the key is missing or the value
+ *		associated with the key.
+ * @Return	IMG_SUCCESS or an error code.
+ */
+int vid_hash_remove(struct hash * const ps_hash,
+		    unsigned long long key,
+		    unsigned long * const result);
+
+#endif /* _HASH_H_ */
diff --git a/drivers/staging/media/vxd/common/pool.c b/drivers/staging/media/vxd/common/pool.c
new file mode 100644
index 000000000000..c0cb1e465c50
--- /dev/null
+++ b/drivers/staging/media/vxd/common/pool.c
@@ -0,0 +1,228 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Object Pool Memory Allocator
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ *	Lakshmi Sankar <lakshmisankar-t@ti.com>
+ *
+ * Re-written for upstream
+ *	Sidraya Jayagond <sidraya.bj@pathpartnertech.com>
+ */
+
+#include <linux/dma-mapping.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-mem2mem.h>
+
+#include "img_errors.h"
+#include "pool.h"
+
+#define BUFF_MAX_SIZE 4096
+#define BUFF_MAX_GROW 32
+
+/* 64 bits */
+#define ALIGN_SIZE (sizeof(long long) - 1)
+
+struct pool {
+	unsigned char *name;
+	unsigned int size;
+	unsigned int grow;
+	struct buffer *buffers;
+	struct object *objects;
+};
+
+struct buffer {
+	struct buffer *next;
+};
+
+struct object {
+	struct object *next;
+};
+
+static inline unsigned char *strdup_cust(const unsigned char *str)
+{
+	unsigned char *r = kmalloc(strlen(str) + 1, GFP_KERNEL);
+
+	if (r)
+		strcpy(r, str);
+	return r;
+}
+
+/*
+ * pool_create - Create an sObject pool
+ * @name: Name of sObject pool for diagnostic purposes
+ * @obj_size: size of each sObject in the pool in bytes
+ * @pool_hdnl: Will contain NULL or sObject pool handle
+ *
+ * This function Create an sObject pool
+ */
+
+int pool_create(const unsigned char * const name,
+		unsigned int obj_size,
+		struct pool ** const pool_hdnl)
+{
+	struct pool *local_pool = NULL;
+	unsigned int result = IMG_ERROR_FATAL;
+
+	if (!name || !pool_hdnl) {
+		result = IMG_ERROR_INVALID_PARAMETERS;
+		return result;
+	}
+
+	local_pool = kmalloc((sizeof(*local_pool)), GFP_KERNEL);
+	if (!local_pool) {
+		result = IMG_ERROR_INVALID_PARAMETERS;
+		return result;
+	}
+
+	local_pool->name = strdup_cust((unsigned char *)name);
+	local_pool->size = obj_size;
+	local_pool->buffers = NULL;
+	local_pool->objects = NULL;
+	local_pool->grow =
+		(BUFF_MAX_SIZE - sizeof(struct buffer)) /
+		(obj_size + ALIGN_SIZE);
+
+	if (local_pool->grow == 0)
+		local_pool->grow = 1;
+	else if (local_pool->grow > BUFF_MAX_GROW)
+		local_pool->grow = BUFF_MAX_GROW;
+
+	*pool_hdnl = local_pool;
+	result = IMG_SUCCESS;
+
+	return result;
+}
+
+/*
+ * @Function	pool_delete
+ * @Description
+ * Delete an sObject pool. All psObjects allocated from the pool must
+ * be free'd with pool_free() before deleting the sObject pool.
+ * @Input	pool : Object Pool pointer
+ * @Return IMG_SUCCESS or an error code.
+ */
+int pool_delete(struct pool * const pool_arg)
+{
+	struct buffer *local_buf = NULL;
+	unsigned int result = IMG_ERROR_FATAL;
+
+	if (!pool_arg) {
+		result = IMG_ERROR_INVALID_PARAMETERS;
+		return result;
+	}
+
+	local_buf = pool_arg->buffers;
+	while (local_buf) {
+		local_buf = local_buf->next;
+		kfree(pool_arg->buffers);
+		pool_arg->buffers = local_buf;
+	}
+
+	kfree(pool_arg->name);
+	pool_arg->name = NULL;
+
+	kfree(pool_arg);
+	result = IMG_SUCCESS;
+
+	return result;
+}
+
+/*
+ * @Function	pool_alloc
+ * @Description
+ * Allocate an sObject from an sObject pool.
+ * @Input	pool_arg : Object Pool
+ * @Output	obj_hndl : Pointer containing the handle to the
+ * object created or IMG_NULL
+ * @Return    IMG_SUCCESS or an error code.
+ */
+int pool_alloc(struct pool * const pool_arg,
+	       void ** const obj_hndl)
+{
+	struct object *local_obj1 = NULL;
+	struct buffer *local_buf = NULL;
+	unsigned int idx = 0;
+	unsigned int sz = 0;
+	unsigned int result = IMG_ERROR_FATAL;
+
+	if (!pool_arg || !obj_hndl) {
+		result = IMG_ERROR_INVALID_PARAMETERS;
+		return result;
+	}
+
+	if (!pool_arg->objects) {
+		sz = (pool_arg->size + ALIGN_SIZE);
+		sz *= (pool_arg->grow + sizeof(struct buffer));
+		local_buf = kmalloc(sz, GFP_KERNEL);
+		if (!local_buf) {
+			result = IMG_ERROR_MALLOC_FAILED;
+			return result;
+		}
+
+		local_buf->next = pool_arg->buffers;
+		pool_arg->buffers = local_buf;
+
+		for (idx = 0; idx < pool_arg->grow; idx++) {
+			struct object *local_obj2;
+			unsigned char *temp_ptr = NULL;
+
+			local_obj2 = (struct object *)(((unsigned char *)(local_buf + 1))
+				+ (idx * (pool_arg->size + ALIGN_SIZE)));
+
+			temp_ptr = (unsigned char *)local_obj2;
+			if ((unsigned long)temp_ptr & ALIGN_SIZE) {
+				temp_ptr += ((ALIGN_SIZE + 1)
+					- ((unsigned long)temp_ptr & ALIGN_SIZE));
+				local_obj2 = (struct object *)temp_ptr;
+			}
+
+			local_obj2->next = pool_arg->objects;
+			pool_arg->objects = local_obj2;
+		}
+	}
+
+	if (!pool_arg->objects) {
+		result = IMG_ERROR_UNEXPECTED_STATE;
+		return result;
+	}
+
+	local_obj1 = pool_arg->objects;
+	pool_arg->objects = local_obj1->next;
+
+	*obj_hndl = (void *)(local_obj1);
+	result = IMG_SUCCESS;
+
+	return result;
+}
+
+/*
+ * @Function	pool_free
+ * @Description
+ * Free an sObject previously allocated from an sObject pool.
+ * @Input	pool_arg : Object Pool pointer.
+ * @Output	h_object : Handle to the object to be freed.
+ * @Return	IMG_SUCCESS or an error code.
+ */
+int pool_free(struct pool * const pool_arg,
+	      void * const obj_hndl)
+{
+	struct object *object = NULL;
+	unsigned int result = IMG_ERROR_FATAL;
+
+	if (!pool_arg || !obj_hndl) {
+		result = IMG_ERROR_INVALID_PARAMETERS;
+		return result;
+	}
+
+	object = (struct object *)obj_hndl;
+	object->next = pool_arg->objects;
+	pool_arg->objects = object;
+
+	result = IMG_SUCCESS;
+
+	return result;
+}
diff --git a/drivers/staging/media/vxd/common/pool.h b/drivers/staging/media/vxd/common/pool.h
new file mode 100644
index 000000000000..d22d15a2af54
--- /dev/null
+++ b/drivers/staging/media/vxd/common/pool.h
@@ -0,0 +1,66 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Object Pool Memory Allocator header
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ *	Lakshmi Sankar <lakshmisankar-t@ti.com>
+ *
+ * Re-written for upstream
+ *	Sidraya Jayagond <sidraya.bj@pathpartnertech.com>
+ */
+#ifndef _pool_h_
+#define _pool_h_
+
+#include <linux/types.h>
+
+struct pool;
+
+/**
+ * pool_create - Create an sObject pool
+ * @name: Name of sObject pool for diagnostic purposes
+ * @obj_size: size of each sObject in the pool in bytes
+ * @pool: Will contain NULL or sObject pool handle
+ *
+ * Return	IMG_SUCCESS or an error code.
+ */
+int pool_create(const unsigned char * const name,
+		unsigned int obj_size,
+		struct pool ** const pool);
+
+/*
+ * @Function	pool_delete
+ * @Description
+ * Delete an sObject pool. All psObjects allocated from the pool must
+ * be free'd with pool_free() before deleting the sObject pool.
+ * @Input	pool : Object Pool pointer
+ * @Return IMG_SUCCESS or an error code.
+ */
+int pool_delete(struct pool * const pool);
+
+/*
+ * @Function	pool_alloc
+ * @Description
+ * Allocate an Object from an Object pool.
+ * @Input	pool : Object Pool
+ * @Output	obj_hdnl : Pointer containing the handle to the
+ * object created or IMG_NULL
+ * @Return    IMG_SUCCESS or an error code.
+ */
+int pool_alloc(struct pool * const pool,
+	       void ** const obj_hdnl);
+
+/*
+ * @Function	pool_free
+ * @Description
+ * Free an sObject previously allocated from an sObject pool.
+ * @Input	pool : Object Pool pointer.
+ * @Output	obj_hdnl : Handle to the object to be freed.
+ * @Return	IMG_SUCCESS or an error code.
+ */
+int pool_free(struct pool * const pool,
+	      void * const obj_hdnl);
+
+#endif /* _pool_h_ */
diff --git a/drivers/staging/media/vxd/common/ra.c b/drivers/staging/media/vxd/common/ra.c
new file mode 100644
index 000000000000..ac07737f351b
--- /dev/null
+++ b/drivers/staging/media/vxd/common/ra.c
@@ -0,0 +1,972 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Implements generic resource allocation.
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ *	Lakshmi Sankar <lakshmisankar-t@ti.com>
+ *
+ * Re-written for upstream
+ *	Sidraya Jayagond <sidraya.bj@pathpartnertech.com>
+ */
+
+#include <linux/dma-mapping.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-mem2mem.h>
+
+#include "hash.h"
+#include "img_errors.h"
+#include "pool.h"
+#include "ra.h"
+
+static unsigned char	global_init;
+
+/* pool of struct arena's */
+static struct pool *global_pool_arena;
+
+/* pool of struct boundary tag */
+static struct pool *global_pool_bt;
+
+/**
+ * ra_request_alloc_fail - ra_request_alloc_fail
+ * @import_hdnl : Callback handle.
+ * @requested_size : Requested allocation size.
+ * @ref : Pointer to user reference data.
+ * @alloc_flags : Allocation flags.
+ * @actual_size : Pointer to contain the actual allocated size.
+ * @base_addr : Allocation base(always 0,it is failing).
+ *
+ * Default callback allocator used if no callback is specified, always fails
+ * to allocate further resources to the arena.
+ */
+static int ra_request_alloc_fail(void *import_hdnl,
+				 unsigned long long requested_size,
+				 unsigned long long *actual_size,
+				 void **ref,
+				 unsigned int alloc_flags,
+				 unsigned long long *base_addr)
+{
+	if (base_addr)
+		*base_addr = 0;
+
+	return IMG_SUCCESS;
+}
+
+/*
+ * @Function	ra_log2
+ * @Description
+ * Calculates the Log2(n) with n being a 64-bit value.
+ *
+ * @Input	value : Input value.
+ * @Output	None
+ * @Return	result : Log2(ui64Value).
+ */
+
+static unsigned int ra_log2(unsigned long long value)
+{
+	int res = 0;
+
+	value >>= 1;
+	while (value > 0) {
+		value >>= 1;
+		res++;
+	}
+	return res;
+}
+
+/*
+ * @Function	ra_segment_list_insert_after
+ * @Description	Insert a boundary tag into an arena segment list after a
+ *		specified boundary tag.
+ * @Input	arena_arg : Pointer to the input arena.
+ * @Input	bt_here_arg : The boundary tag before which psBTToInsert
+ *		will be added .
+ * @Input	bt_to_insert_arg : The boundary tag to insert.
+ * @Output	None
+ * @Return	None
+ */
+static void ra_segment_list_insert_after(struct arena *arena_arg,
+					 struct btag *bt_here_arg,
+					 struct btag *bt_to_insert_arg)
+{
+	bt_to_insert_arg->nxt_seg = bt_here_arg->nxt_seg;
+	bt_to_insert_arg->prv_seg = bt_here_arg;
+
+	if (!bt_here_arg->nxt_seg)
+		arena_arg->tail_seg = bt_to_insert_arg;
+	else
+		bt_here_arg->nxt_seg->prv_seg = bt_to_insert_arg;
+
+	bt_here_arg->nxt_seg = bt_to_insert_arg;
+}
+
+/*
+ * @Function	ra_segment_list_insert
+ * @Description
+ * Insert a boundary tag into an arena segment list at the appropriate point.
+ * @Input	arena_arg : Pointer to the input arena.
+ * @Input	bt_to_insert_arg : The boundary tag to insert.
+ * @Output	None
+ * @Return	None
+ */
+static void ra_segment_list_insert(struct arena *arena_arg,
+				   struct btag *bt_to_insert_arg)
+{
+	/* insert into the segment chain */
+	if (!arena_arg->head_seg) {
+		arena_arg->head_seg = bt_to_insert_arg;
+		arena_arg->tail_seg = bt_to_insert_arg;
+		bt_to_insert_arg->nxt_seg = NULL;
+		bt_to_insert_arg->prv_seg = NULL;
+	} else {
+		struct btag *bt_scan = arena_arg->head_seg;
+
+		while (bt_scan->nxt_seg &&
+		       bt_to_insert_arg->base >=
+		       bt_scan->nxt_seg->base) {
+			bt_scan = bt_scan->nxt_seg;
+		}
+		ra_segment_list_insert_after(arena_arg,
+					     bt_scan,
+					     bt_to_insert_arg);
+	}
+}
+
+/*
+ * @Function	ra_SegmentListRemove
+ * @Description
+ * Insert a boundary tag into an arena segment list at the appropriate point.
+ * @Input	arena_arg : Pointer to the input arena.
+ * @Input	bt_to_remove_arg : The boundary tag to insert.
+ * @Output	None
+ * @Return	None
+ */
+static void ra_segment_list_remove(struct arena *arena_arg,
+				   struct btag *bt_to_remove_arg)
+{
+	if (!bt_to_remove_arg->prv_seg)
+		arena_arg->head_seg = bt_to_remove_arg->nxt_seg;
+	else
+		bt_to_remove_arg->prv_seg->nxt_seg = bt_to_remove_arg->nxt_seg;
+
+	if (!bt_to_remove_arg->nxt_seg)
+		arena_arg->tail_seg = bt_to_remove_arg->prv_seg;
+	else
+		bt_to_remove_arg->nxt_seg->prv_seg = bt_to_remove_arg->prv_seg;
+}
+
+/*
+ * @Function	ra_segment_split
+ * @Description
+ * Split a segment into two, maintain the arena segment list.
+ * The boundary tag should not be in the free table. Neither the original or
+ * the new psBTNeighbour bounary tag will be in the free table.
+ * @Input	arena_arg : Pointer to the input arena.
+ * @Input	bt_to_split_arg : The boundary tag to split.
+ *		The required segment size of boundary tag after the split.
+ * @Output	None
+ * @Return	btag *: New boundary tag.
+ */
+static struct btag *ra_segment_split(struct arena *arena_arg,
+				     struct btag *bt_to_split_arg,
+				     unsigned long long size)
+{
+	struct btag *local_bt_neighbour = NULL;
+	int res = IMG_ERROR_FATAL;
+
+	res = pool_alloc(global_pool_bt, ((void **)&local_bt_neighbour));
+	if (res != IMG_SUCCESS)
+		return NULL;
+
+	local_bt_neighbour->prv_seg = bt_to_split_arg;
+	local_bt_neighbour->nxt_seg = bt_to_split_arg->nxt_seg;
+	local_bt_neighbour->bt_type = RA_BOUNDARY_TAG_TYPE_FREE;
+	local_bt_neighbour->size = (bt_to_split_arg->size - size);
+	local_bt_neighbour->base = (bt_to_split_arg->base + size);
+	local_bt_neighbour->nxt_free = NULL;
+	local_bt_neighbour->prv_free = NULL;
+	local_bt_neighbour->ref = bt_to_split_arg->ref;
+
+	if (!bt_to_split_arg->nxt_seg)
+		arena_arg->tail_seg = local_bt_neighbour;
+	else
+		bt_to_split_arg->nxt_seg->prv_seg = local_bt_neighbour;
+
+	bt_to_split_arg->nxt_seg = local_bt_neighbour;
+	bt_to_split_arg->size = size;
+
+	return local_bt_neighbour;
+}
+
+/*
+ * @Function	ra_free_list_insert
+ * @Description
+ * Insert a boundary tag into an arena free table.
+ * @Input	arena_arg : Pointer to the input arena.
+ * @Input	bt_arg : The boundary tag to insert into an arena
+ *		free table.
+ * @Output	None
+ * @Return	None
+ */
+static void ra_free_list_insert(struct arena *arena_arg,
+				struct btag *bt_arg)
+{
+	unsigned int index = ra_log2(bt_arg->size);
+
+	bt_arg->bt_type = RA_BOUNDARY_TAG_TYPE_FREE;
+	if (index < FREE_TABLE_LIMIT)
+		bt_arg->nxt_free = arena_arg->head_free[index];
+	else
+		bt_arg->nxt_free = NULL;
+
+	bt_arg->prv_free = NULL;
+
+	if (index < FREE_TABLE_LIMIT) {
+		if (arena_arg->head_free[index])
+			arena_arg->head_free[index]->prv_free = bt_arg;
+	}
+
+	if (index < FREE_TABLE_LIMIT)
+		arena_arg->head_free[index] = bt_arg;
+}
+
+/*
+ * @Function	ra_free_list_remove
+ * @Description
+ * Remove a boundary tag from an arena free table.
+ * @Input	arena_arg : Pointer to the input arena.
+ * @Input	bt_arg    : The boundary tag to remove from
+ *		an arena free table.
+ * @Output	None
+ * @Return	None
+ */
+static void ra_free_list_remove(struct arena *arena_arg,
+				struct btag *bt_arg)
+{
+	unsigned int index = ra_log2(bt_arg->size);
+
+	if (bt_arg->nxt_free)
+		bt_arg->nxt_free->prv_free = bt_arg->prv_free;
+
+	if (!bt_arg->prv_free && index < FREE_TABLE_LIMIT)
+		arena_arg->head_free[index] = bt_arg->nxt_free;
+	else if (bt_arg->prv_free)
+		bt_arg->prv_free->nxt_free = bt_arg->nxt_free;
+}
+
+/*
+ * @Function	ra_build_span_marker
+ * @Description
+ * Construct a span marker boundary tag.
+ * @Input	base : The base of the boundary tag.
+ * @Output	None
+ * @Return    btag * : New span marker boundary tag
+ */
+static struct btag *ra_build_span_marker(unsigned long long base)
+{
+	struct btag *local_bt = NULL;
+	int res = IMG_ERROR_FATAL;
+
+	res = pool_alloc(global_pool_bt, ((void **)&local_bt));
+	if (res != IMG_SUCCESS)
+		return NULL;
+
+	local_bt->bt_type = RA_BOUNDARY_TAG_TYPE_SPAN;
+	local_bt->base = base;
+	local_bt->size = 0;
+	local_bt->nxt_seg = NULL;
+	local_bt->prv_seg = NULL;
+	local_bt->nxt_free = NULL;
+	local_bt->prv_free = NULL;
+	local_bt->ref = NULL;
+
+	return local_bt;
+}
+
+/*
+ * @Function	ra_build_bt
+ * @Description
+ * Construct a boundary tag for a free segment.
+ * @Input	    ui64Base    : The base of the resource segment.
+ * @Input	    ui64Size    : The extent of the resource segment.
+ * @Output	None
+ * @Return    btag *       : New boundary tag
+ */
+static struct btag *ra_build_bt(unsigned long long base, unsigned long long size)
+{
+	struct btag *local_bt = NULL;
+	int res = IMG_ERROR_FATAL;
+
+	res = pool_alloc(global_pool_bt, ((void **)&local_bt));
+
+	if (res != IMG_SUCCESS)
+		return local_bt;
+
+	local_bt->bt_type = RA_BOUNDARY_TAG_TYPE_FREE;
+	local_bt->base = base;
+	local_bt->size = size;
+	local_bt->nxt_seg = NULL;
+	local_bt->prv_seg = NULL;
+	local_bt->nxt_free = NULL;
+	local_bt->prv_free = NULL;
+	local_bt->ref = NULL;
+
+	return local_bt;
+}
+
+/*
+ * @Function	ra_insert_resource
+ * @Description
+ * Add a free resource segment to an arena.
+ * @Input	base : The base of the resource segment.
+ * @Input	size : The size of the resource segment.
+ * @Output	None
+ * @Return	IMG_SUCCESS or an error code.
+ */
+static int ra_insert_resource(struct arena *arena_arg,
+			      unsigned long long base,
+			      unsigned long long size)
+{
+	struct btag *local_bt = NULL;
+
+	local_bt = ra_build_bt(base, size);
+	if (!local_bt)
+		return IMG_ERROR_UNEXPECTED_STATE;
+
+	ra_segment_list_insert(arena_arg, local_bt);
+	ra_free_list_insert(arena_arg, local_bt);
+	arena_arg->max_idx = ra_log2(size);
+	if (1ULL << arena_arg->max_idx < size)
+		arena_arg->max_idx++;
+
+	return IMG_SUCCESS;
+}
+
+/*
+ * @Function	ra_insert_resource_span
+ * @Description
+ * Add a free resource span to an arena, complete with span markers.
+ * @Input	arena_arg : Pointer to the input arena.
+ * @Input	base : The base of the resource segment.
+ * @Input	size : The size of the resource segment.
+ * @Output	None
+ * @Return	btag * : The boundary tag representing
+ *		the free resource segment.
+ */
+static struct btag *ra_insert_resource_span(struct arena *arena_arg,
+					    unsigned long long base,
+					    unsigned long long size)
+{
+	struct btag *local_bt = NULL;
+	struct btag *local_bt_span_start = NULL;
+	struct btag *local_bt_span_end = NULL;
+
+	local_bt_span_start = ra_build_span_marker(base);
+	if (!local_bt_span_start)
+		return NULL;
+
+	local_bt_span_end = ra_build_span_marker(base + size);
+	if (!local_bt_span_end) {
+		pool_free(global_pool_bt, local_bt_span_start);
+		return NULL;
+	}
+
+	local_bt = ra_build_bt(base, size);
+	if (!local_bt) {
+		pool_free(global_pool_bt, local_bt_span_end);
+		pool_free(global_pool_bt, local_bt_span_start);
+		return NULL;
+	}
+
+	ra_segment_list_insert(arena_arg, local_bt_span_start);
+	ra_segment_list_insert_after(arena_arg,
+				     local_bt_span_start,
+				     local_bt);
+	ra_free_list_insert(arena_arg, local_bt);
+	ra_segment_list_insert_after(arena_arg,
+				     local_bt,
+				     local_bt_span_end);
+
+	return local_bt;
+}
+
+/*
+ * @Function	ra_free_bt
+ * @Description
+ * Free a boundary tag taking care of the segment list and the
+ *		boundary tag free table.
+ * @Input	arena_arg : Pointer to the input arena.
+ * @Input	bt_arg : The boundary tag to free.
+ * @Output	None
+ * @Return	None
+ */
+static void ra_free_bt(struct arena *arena_arg,
+		       struct btag *bt_arg)
+{
+	struct btag *bt_neibr;
+
+	/* try and coalesce with left bt_neibr */
+	bt_neibr = bt_arg->prv_seg;
+	if (bt_neibr &&
+	    bt_neibr->bt_type == RA_BOUNDARY_TAG_TYPE_FREE &&
+	    bt_neibr->base + bt_neibr->size == bt_arg->base) {
+		ra_free_list_remove(arena_arg, bt_neibr);
+		ra_segment_list_remove(arena_arg, bt_neibr);
+		bt_arg->base = bt_neibr->base;
+		bt_arg->size += bt_neibr->size;
+		pool_free(global_pool_bt, bt_neibr);
+	}
+
+	/* try to coalesce with right psBTNeighbour */
+	bt_neibr = bt_arg->nxt_seg;
+	if (bt_neibr &&
+	    bt_neibr->bt_type == RA_BOUNDARY_TAG_TYPE_FREE &&
+	    bt_arg->base + bt_arg->size == bt_neibr->base) {
+		ra_free_list_remove(arena_arg, bt_neibr);
+		ra_segment_list_remove(arena_arg, bt_neibr);
+		bt_arg->size += bt_neibr->size;
+		pool_free(global_pool_bt, bt_neibr);
+	}
+
+	if (bt_arg->nxt_seg &&
+	    bt_arg->nxt_seg->bt_type == RA_BOUNDARY_TAG_TYPE_SPAN &&
+	    bt_arg->prv_seg && bt_arg->prv_seg->bt_type ==
+	    RA_BOUNDARY_TAG_TYPE_SPAN) {
+		struct btag *ps_bt_nxt = bt_arg->nxt_seg;
+		struct btag *ps_bt_prev = bt_arg->prv_seg;
+
+		ra_segment_list_remove(arena_arg, ps_bt_nxt);
+		ra_segment_list_remove(arena_arg, ps_bt_prev);
+		ra_segment_list_remove(arena_arg, bt_arg);
+		arena_arg->import_free_fxn(arena_arg->import_hdnl,
+					   bt_arg->base,
+					   bt_arg->ref);
+		pool_free(global_pool_bt, ps_bt_nxt);
+		pool_free(global_pool_bt, ps_bt_prev);
+		pool_free(global_pool_bt, bt_arg);
+	} else {
+		ra_free_list_insert(arena_arg, bt_arg);
+	}
+}
+
+static int ra_check_btag(struct arena *arena_arg,
+			 unsigned long long size_arg,
+			 void **ref,
+			 struct btag *bt_arg,
+			 unsigned long long align_arg,
+			 unsigned long long *base_arg,
+			 unsigned int align_log2)
+{
+	unsigned long long local_align_base;
+	int res = IMG_ERROR_FATAL;
+
+	while (bt_arg) {
+		if (align_arg > 1ULL)
+			local_align_base = ((bt_arg->base + align_arg - 1)
+				>> align_log2) << align_log2;
+		else
+			local_align_base = bt_arg->base;
+
+		if ((bt_arg->base + bt_arg->size) >=
+		     (local_align_base + size_arg)) {
+			ra_free_list_remove(arena_arg, bt_arg);
+
+			/*
+			 * with align_arg we might need to discard the front of
+			 * this segment
+			 */
+			if (local_align_base > bt_arg->base) {
+				struct btag *btneighbor;
+
+				btneighbor = ra_segment_split(arena_arg,
+							      bt_arg,
+							      (local_align_base -
+							       bt_arg->base));
+				/*
+				 * Partition the buffer, create a new boundary
+				 * tag
+				 */
+				if (!btneighbor)
+					return IMG_ERROR_UNEXPECTED_STATE;
+
+				ra_free_list_insert(arena_arg, bt_arg);
+				bt_arg = btneighbor;
+			}
+
+			/*
+			 * The segment might be too big, if so, discard the back
+			 * of the segment
+			 */
+			if (bt_arg->size > size_arg) {
+				struct btag *btneighbor;
+
+				btneighbor = ra_segment_split(arena_arg,
+							      bt_arg,
+							      size_arg);
+				/*
+				 * Partition the buffer, create a new boundary
+				 * tag
+				 */
+				if (!btneighbor)
+					return IMG_ERROR_UNEXPECTED_STATE;
+
+				ra_free_list_insert(arena_arg, btneighbor);
+			}
+
+			bt_arg->bt_type = RA_BOUNDARY_TAG_TYPE_LIVE;
+
+			res = vid_hash_insert(arena_arg->hash_tbl,
+					      bt_arg->base,
+					      (unsigned long)bt_arg);
+			if (res != IMG_SUCCESS) {
+				ra_free_bt(arena_arg, bt_arg);
+				*base_arg = 0;
+				return IMG_ERROR_UNEXPECTED_STATE;
+			}
+
+			if (ref)
+				*ref = bt_arg->ref;
+
+			*base_arg = bt_arg->base;
+			return IMG_SUCCESS;
+		}
+		bt_arg = bt_arg->nxt_free;
+	}
+
+	return res;
+}
+
+/*
+ * @Function	ra_attempt_alloc_aligned
+ * @Description	Attempt to allocate from an arena
+ * @Input	arena_arg: Pointer to the input arena
+ * @Input	size_arg: The requested allocation size
+ * @Input	ref: The user references associated with the allocated
+ *		segment
+ * @Input	align_arg: Required alignment
+ * @Output	base_arg: Allocated resource size
+ * @Return	IMG_SUCCESS or an error code
+ */
+static int ra_attempt_alloc_aligned(struct arena *arena_arg,
+				    unsigned long long size_arg,
+				    void **ref,
+				    unsigned long long align_arg,
+				    unsigned long long *base_arg)
+{
+	unsigned int index;
+	unsigned int align_log2;
+	int res = IMG_ERROR_FATAL;
+
+	if (!arena_arg || !base_arg)
+		return IMG_ERROR_INVALID_PARAMETERS;
+
+	/*
+	 * Take the log of the alignment to get number of bits to shift
+	 * left/right for multiply/divide. Assumption made here is that
+	 * alignment has to be a power of 2 value. Aserting otherwise.
+	 */
+	align_log2 = ra_log2(align_arg);
+
+	/*
+	 * Search for a near fit free boundary tag, start looking at the
+	 * log2 free table for our required size and work on up the table.
+	 */
+	index = ra_log2(size_arg);
+
+	/*
+	 * If the Size required is exactly 2**n then use the n bucket, because
+	 * we know that every free block in that bucket is larger than 2**n,
+	 * otherwise start at then next bucket up.
+	 */
+	if (size_arg > (1ull << index))
+		index++;
+
+	while ((index < FREE_TABLE_LIMIT) && !arena_arg->head_free[index])
+		index++;
+
+	if (index >= FREE_TABLE_LIMIT) {
+		pr_err("requested allocation size doesn't fit in the arena. Increase MMU HEAP Size\n");
+		return IMG_ERROR_OUT_OF_MEMORY;
+	}
+
+	while (index < FREE_TABLE_LIMIT) {
+		if (arena_arg->head_free[index]) {
+			/* we have a cached free boundary tag */
+			struct btag *local_bt =
+				arena_arg->head_free[index];
+
+			res = ra_check_btag(arena_arg,
+					    size_arg,
+					    ref,
+					    local_bt,
+					    align_arg,
+					    base_arg,
+					    align_log2);
+			if (res != IMG_SUCCESS)
+				return res;
+		}
+		index++;
+	}
+
+	return IMG_SUCCESS;
+}
+
+/*
+ * @Function	vid_ra_init
+ * @Description	Initializes the RA module. Must be called before any other
+ *		ra API function
+ * @Return	IMG_SUCCESS or an error code
+ *
+ */
+int vid_ra_initialise(void)
+{
+	int res = IMG_ERROR_FATAL;
+
+	if (!global_init) {
+		res = pool_create("img-arena",
+				  sizeof(struct arena),
+				  &global_pool_arena);
+		if (res != IMG_SUCCESS)
+			return IMG_ERROR_UNEXPECTED_STATE;
+
+		res = pool_create("img-bt",
+				  sizeof(struct btag),
+				  &global_pool_bt);
+		if (res != IMG_SUCCESS) {
+			res = pool_delete(global_pool_arena);
+			global_pool_arena = NULL;
+			return IMG_ERROR_UNEXPECTED_STATE;
+		}
+		global_init = 1;
+		res = IMG_SUCCESS;
+	}
+
+	return res;
+}
+
+/*
+ * @Function	vid_ra_deinit
+ * @Description	Deinitializes the RA module
+ * @Return	IMG_SUCCESS or an error code
+ *
+ */
+int vid_ra_deinit(void)
+{
+	int res = IMG_ERROR_FATAL;
+
+	if (global_init) {
+		if (global_pool_arena) {
+			res = pool_delete(global_pool_arena);
+			global_pool_arena = NULL;
+		}
+		if (global_pool_bt) {
+			res = pool_delete(global_pool_bt);
+			global_pool_bt = NULL;
+		}
+		global_init = 0;
+		res = IMG_SUCCESS;
+	}
+	return res;
+}
+
+/*
+ * @Function	vid_ra_create
+ * @Description	Used to create a resource arena.
+ * @Input	name: The name of the arena for diagnostic purposes
+ * @Input	base_arg: The base of an initial resource span or 0
+ * @Input	size_arg: The size of an initial resource span or 0
+ * @Input	quantum: The arena allocation quantum
+ * @Input	(*import_alloc_fxn): A resource allocation callback or NULL
+ * @Input	(*import_free_fxn): A resource de-allocation callback or NULL
+ * @Input	import_hdnl: Handle passed to alloc and free or NULL
+ * @Output	arena_hndl: The handle for the arene being created, or NULL
+ * @Return	IMG_SUCCESS or an error code
+ *
+ */
+int vid_ra_create(const unsigned char * const name,
+		  unsigned long long base_arg,
+		  unsigned long long size_arg,
+		  unsigned long	quantum,
+		  int (*import_alloc_fxn)(void * const import_hdnl,
+					  unsigned long long req_sz,
+					  unsigned long long * const actl_sz,
+					  void ** const ref,
+					  unsigned int alloc_flags,
+					  unsigned long long * const base_arg),
+		  int (*import_free_fxn)(void * const import_hdnl,
+					 unsigned long long import_base,
+					 void * const import_ref),
+					 void *import_hdnl,
+					 void **arena_hndl)
+{
+	struct arena *local_arena = NULL;
+	unsigned int idx = 0;
+	int res = IMG_ERROR_FATAL;
+
+	if (!arena_hndl)
+		return IMG_ERROR_INVALID_PARAMETERS;
+
+	*(arena_hndl) = NULL;
+
+	if (global_init) {
+		res = pool_alloc(global_pool_arena, ((void **)&local_arena));
+		if (!local_arena || res != IMG_SUCCESS)
+			return IMG_ERROR_UNEXPECTED_STATE;
+
+		local_arena->name = NULL;
+		if (name)
+			local_arena->name = kstrdup((const signed char *)name,
+						    GFP_KERNEL);
+		if (import_alloc_fxn)
+			local_arena->import_alloc_fxn = import_alloc_fxn;
+		else
+			local_arena->import_alloc_fxn = ra_request_alloc_fail;
+
+		local_arena->import_free_fxn = import_free_fxn;
+		local_arena->import_hdnl = import_hdnl;
+
+		for (idx = 0; idx < FREE_TABLE_LIMIT; idx++)
+			local_arena->head_free[idx] = NULL;
+
+		local_arena->head_seg = NULL;
+		local_arena->tail_seg = NULL;
+		local_arena->quantum = quantum;
+
+		res = vid_hash_create(MINIMUM_HASH_SIZE,
+				      &local_arena->hash_tbl);
+
+		if (!local_arena->hash_tbl) {
+			vid_hash_delete(local_arena->hash_tbl);
+			kfree(local_arena->name);
+			local_arena->name = NULL;
+			return IMG_ERROR_UNEXPECTED_STATE;
+		}
+
+		//if (size_arg > (unsigned long long)0) {
+		if (size_arg > 0ULL) {
+			size_arg = (size_arg + quantum - 1) / quantum * quantum;
+
+			res = ra_insert_resource(local_arena,
+						 base_arg,
+						 size_arg);
+			if (res != IMG_SUCCESS) {
+				vid_hash_delete(local_arena->hash_tbl);
+				pool_free(global_pool_arena, local_arena);
+				kfree(local_arena->name);
+				local_arena->name = NULL;
+				return IMG_ERROR_UNEXPECTED_STATE;
+			}
+		}
+		*(arena_hndl) = local_arena;
+		res = IMG_SUCCESS;
+	}
+
+	return res;
+}
+
+/*
+ * @Function	vid_ra_delete
+ * @Description	Used to delete a resource arena. All resources allocated from
+ *		the arena must be freed before deleting the arena
+ * @Input	arena_hndl: The handle to the arena to delete
+ * @Return	IMG_SUCCESS or an error code
+ *
+ */
+int vid_ra_delete(void * const arena_hndl)
+{
+	int res = IMG_ERROR_FATAL;
+	struct arena *local_arena = NULL;
+	unsigned int idx;
+
+	if (!arena_hndl)
+		return IMG_ERROR_INVALID_PARAMETERS;
+
+	if (global_init) {
+		local_arena = (struct arena *)arena_hndl;
+		kfree(local_arena->name);
+		local_arena->name = NULL;
+		for (idx = 0; idx < FREE_TABLE_LIMIT; idx++)
+			local_arena->head_free[idx] = NULL;
+
+		while (local_arena->head_seg) {
+			struct btag *local_bt = local_arena->head_seg;
+
+			ra_segment_list_remove(local_arena, local_bt);
+		}
+		res = vid_hash_delete(local_arena->hash_tbl);
+		if (res != IMG_SUCCESS)
+			return IMG_ERROR_UNEXPECTED_STATE;
+
+		res = pool_free(global_pool_arena, local_arena);
+		if (res != IMG_SUCCESS)
+			return IMG_ERROR_UNEXPECTED_STATE;
+	}
+
+	return res;
+}
+
+/*
+ * @Function	vid_ra_add
+ * @Description	Used to add a resource span to an arena. The span must not
+ *		overlap with any span previously added to the arena
+ * @Input	base_arg: The base_arg of the span
+ * @Input	size_arg: The size of the span
+ * @Return	IMG_SUCCESS or an error code
+ *
+ */
+int vid_ra_add(void * const arena_hndl, unsigned long long base_arg, unsigned long long size_arg)
+{
+	int res = IMG_ERROR_FATAL;
+	struct arena *local_arena = NULL;
+
+	if (!arena_hndl)
+		return IMG_ERROR_INVALID_PARAMETERS;
+
+	if (global_init) {
+		local_arena = (struct arena *)arena_hndl;
+		size_arg = (size_arg + local_arena->quantum - 1) /
+			local_arena->quantum * local_arena->quantum;
+
+		res = ra_insert_resource(local_arena, base_arg, size_arg);
+		if (res != IMG_SUCCESS)
+			return IMG_ERROR_INVALID_PARAMETERS;
+	}
+
+	return res;
+}
+
+/*
+ * @Function	vid_ra_alloc
+ * @Description	Used to allocate resource from an arena
+ * @Input	arena_hndl: The handle to the arena to create the resource
+ * @Input	request_size: The requested size of resource segment
+ * @Input	actl_size: The actualSize of resource segment
+ * @Input	ref: The user reference associated with allocated resource
+ *		span
+ * @Input	alloc_flags: AllocationFlags influencing allocation policy
+ * @Input	align_arg: The alignment constraint required for the allocated
+ *		segment
+ * @Output	base_args: The base of the allocated resource
+ * @Return	IMG_SUCCESS or an error code
+ *
+ */
+int vid_ra_alloc(void * const arena_hndl,
+		 unsigned long long request_size,
+		 unsigned long long * const actl_sz,
+		 void ** const ref,
+		 unsigned int alloc_flags,
+		 unsigned long long alignarg,
+		 unsigned long long * const basearg)
+{
+	int res = IMG_ERROR_FATAL;
+	struct arena *arn_ctx = NULL;
+	unsigned long long loc_size = request_size;
+
+	if (!arena_hndl)
+		return IMG_ERROR_INVALID_PARAMETERS;
+
+	if (global_init) {
+		arn_ctx = (struct arena *)arena_hndl;
+		loc_size = ((loc_size + arn_ctx->quantum - 1) /
+				arn_ctx->quantum) * arn_ctx->quantum;
+
+		if (actl_sz)
+			*actl_sz = loc_size;
+
+		/*
+		 * If allocation failed then we might have an import source
+		 * which can provide more resource, else we will have to fail
+		 * the allocation to the caller.
+		 */
+		if (alloc_flags == RA_SEQUENTIAL_ALLOCATION)
+			res = ra_attempt_alloc_aligned(arn_ctx,
+						       loc_size,
+						       ref,
+						       alignarg,
+						       basearg);
+
+		if (res != IMG_SUCCESS) {
+			void *import_ref = NULL;
+			unsigned long long import_base = 0ULL;
+			unsigned long long locimprt_reqsz = loc_size;
+			unsigned long long locimprt_actsz = 0ULL;
+
+			res = arn_ctx->import_alloc_fxn(arn_ctx->import_hdnl,
+					locimprt_reqsz,
+					&locimprt_actsz,
+					&import_ref,
+					alloc_flags,
+					&import_base);
+
+			if (res == IMG_SUCCESS) {
+				struct btag *local_bt =
+					ra_insert_resource_span(arn_ctx,
+								import_base,
+								locimprt_actsz);
+
+				/*
+				 * Successfully import more resource, create a
+				 * span to represent it and retry the allocation
+				 * attempt
+				 */
+				if (!local_bt) {
+					/*
+					 * Insufficient resources to insert the
+					 * newly acquired span, so free it back
+					 */
+					arn_ctx->import_free_fxn(arn_ctx->import_hdnl,
+							import_base,
+							import_ref);
+					return IMG_ERROR_UNEXPECTED_STATE;
+				}
+				local_bt->ref = import_ref;
+				if (alloc_flags == RA_SEQUENTIAL_ALLOCATION) {
+					res = ra_attempt_alloc_aligned(arn_ctx,
+								       loc_size,
+								       ref,
+								       alignarg,
+								       basearg);
+				}
+			}
+		}
+	}
+
+	return res;
+}
+
+/*
+ * @Function	vid_ra_free
+ * @Description	Used to free a resource segment
+ * @Input	arena_hndl: The arena the segment was originally allocated from
+ * @Input	base_arg: The base of the span
+ * @Return	IMG_SUCCESS or an error code
+ *
+ */
+int vid_ra_free(void * const arena_hndl, unsigned long long base_arg)
+{
+	int res = IMG_ERROR_FATAL;
+	struct arena *local_arena = NULL;
+	struct btag *local_bt = NULL;
+	unsigned long	uip_res;
+
+	if (!arena_hndl)
+		return IMG_ERROR_INVALID_PARAMETERS;
+
+	if (global_init) {
+		local_arena = (struct arena *)arena_hndl;
+
+		res = vid_hash_remove(local_arena->hash_tbl,
+				      base_arg,
+				      &uip_res);
+		if (res != IMG_SUCCESS)
+			return res;
+		local_bt = (struct btag *)uip_res;
+
+		ra_free_bt(local_arena, local_bt);
+	}
+
+	return res;
+}
diff --git a/drivers/staging/media/vxd/common/ra.h b/drivers/staging/media/vxd/common/ra.h
new file mode 100644
index 000000000000..a4d529d635d7
--- /dev/null
+++ b/drivers/staging/media/vxd/common/ra.h
@@ -0,0 +1,200 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Implements generic resource allocation.
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ *	Lakshmi Sankar <lakshmisankar-t@ti.com>
+ *
+ * Re-written for upstream
+ *      Sidraya Jayagond <sidraya.bj@pathpartnertech.com>
+ */
+#ifndef _RA_H_
+#define _RA_H_
+
+#define MINIMUM_HASH_SIZE  (64)
+#define FREE_TABLE_LIMIT   (64)
+
+/* Defines whether sequential or random allocation is used */
+enum {
+	RA_SEQUENTIAL_ALLOCATION = 0,
+	RA_RANDOM_ALLOCATION,
+	RA_FORCE32BITS = 0x7FFFFFFFU
+};
+
+/* Defines boundary tag type */
+enum eboundary_tag_type {
+	RA_BOUNDARY_TAG_TYPE_SPAN = 0,
+	RA_BOUNDARY_TAG_TYPE_FREE,
+	RA_BOUNDARY_TAG_TYPE_LIVE,
+	RA_BOUNDARY_TAG_TYPE_MAX,
+	RA_BOUNDARY_FORCE32BITS = 0x7FFFFFFFU
+};
+
+/*
+ * @Description
+ * Boundary tags, used to describe a resource segment
+ *
+ * @enum0: span markers
+ * @enum1: free resource segment
+ * @enum2: allocated resource segment
+ * @enum3: max
+ * @base,size: The base resource of this segment and extent of this segment
+ * @nxt_seg, prv_seg: doubly linked ordered list of all segments
+ *			within the arena
+ * @nxt_free, prv_free: doubly linked un-ordered list of free segments
+ * @reference : a user reference associated with this span, user
+ *		references are currently only provided in
+ *		the callback mechanism
+ */
+struct btag {
+	unsigned int bt_type;
+	unsigned long long base;
+	unsigned long long size;
+	struct btag *nxt_seg;
+	struct btag *prv_seg;
+	struct btag *nxt_free;
+	struct btag *prv_free;
+	void *ref;
+};
+
+/*
+ * @Description
+ * resource allocation arena
+ *
+ * @name: arena for diagnostics output
+ * @quantum: allocations within this arena are quantum sized
+ * @max_idx: index of the last position in the psBTHeadFree table,
+ *		with available free space
+ * @import_alloc_fxn: import interface, if provided
+ * @import_free_fxn: import interface, if provided
+ * @import_hdnl: import interface, if provided
+ * @head_free: head of list of free boundary tags for indexed by Log2
+ *		of the boundary tag size. Power-of-two table of free lists
+ * @head_seg, tail_seg : resource ordered segment list
+ * @ps_hash : segment address to boundary tag hash table
+ */
+struct arena {
+	unsigned char *name;
+	unsigned long quantum;
+	unsigned int max_idx;
+	int (*import_alloc_fxn)(void *import_hdnl,
+				unsigned long long requested_size,
+				unsigned long long *actual_size,
+				void **ref,
+				unsigned int alloc_flags,
+				unsigned long long *base_addr);
+	int (*import_free_fxn)(void *import_hdnl,
+			       unsigned long long base,
+			       void *ref);
+	void *import_hdnl;
+	struct btag *head_free[FREE_TABLE_LIMIT];
+	struct btag *head_seg;
+	struct btag *tail_seg;
+	struct hash *hash_tbl;
+};
+
+/*
+ * @Function	vid_ra_init
+ * @Description	Initializes the RA module. Must be called before any other
+ *		ra API function
+ * @Return	IMG_SUCCESS or an error code
+ *
+ */
+int vid_ra_initialise(void);
+
+/*
+ * @Function	vid_ra_deinit
+ * @Description	Deinitializes the RA module
+ * @Return	IMG_SUCCESS or an error code
+ *
+ */
+int vid_ra_deinit(void);
+
+/*
+ * @Function	vid_ra_create
+ * @Description	Used to create a resource arena.
+ * @Input	name: The name of the arena for diagnostic purposes
+ * @Input	base_arg: The base of an initial resource span or 0
+ * @Input	size_arg: The size of an initial resource span or 0
+ * @Input	quantum: The arena allocation quantum
+ * @Input	(*import_alloc_fxn): A resource allocation callback or NULL
+ * @Input	(*import_free_fxn): A resource de-allocation callback or NULL
+ * @Input	import_hdnl: Handle passed to alloc and free or NULL
+ * @Output	arena_hndl: The handle for the arene being created, or NULL
+ * @Return	IMG_SUCCESS or an error code
+ *
+ */
+int vid_ra_create(const unsigned char * const name,
+		  unsigned long long base_arg,
+		  unsigned long long size_arg,
+		  unsigned long quantum,
+		  int (*import_alloc_fxn)(void * const import_hdnl,
+					  unsigned long long req_sz,
+					  unsigned long long * const actl_sz,
+					  void ** const ref,
+					  unsigned int alloc_flags,
+					  unsigned long long * const base_arg),
+		  int (*import_free_fxn)(void * const import_hdnl,
+					 unsigned long long import_base,
+					 void * const import_ref),
+		  void *import_hdnl,
+		  void **arena_hndl);
+
+/*
+ * @Function	vid_ra_delete
+ * @Description	Used to delete a resource arena. All resources allocated from
+ *		the arena must be freed before deleting the arena
+ * @Input	arena_hndl: The handle to the arena to delete
+ * @Return	IMG_SUCCESS or an error code
+ *
+ */
+int vid_ra_delete(void * const arena_hndl);
+
+/*
+ * @Function	vid_ra_add
+ * @Description	Used to add a resource span to an arena. The span must not
+ *		overlap with any span previously added to the arena
+ * @Input	base_arg: The base_arg of the span
+ * @Input	size_arg: The size of the span
+ * @Return	IMG_SUCCESS or an error code
+ *
+ */
+int vid_ra_add(void * const arena_hndl, unsigned long long base_arg, unsigned long long size_arg);
+
+/*
+ * @Function	vid_ra_alloc
+ * @Description	Used to allocate resource from an arena
+ * @Input	arena_hndl: The handle to the arena to create the resource
+ * @Input	request_size: The requested size of resource segment
+ * @Input	actl_size: The actualSize of resource segment
+ * @Input	ref: The user reference associated with allocated resource
+ *		span
+ * @Input	alloc_flags: AllocationFlags influencing allocation policy
+ * @Input	align_arg: The alignment constraint required for the allocated
+ *		segment
+ * @Output	base_args: The base of the allocated resource
+ * @Return	IMG_SUCCESS or an error code
+ *
+ */
+int vid_ra_alloc(void * const arena_hndl,
+		 unsigned long long request_size,
+		 unsigned long long * const actl_sz,
+		 void ** const ref,
+		 unsigned int alloc_flags,
+		 unsigned long long align_arg,
+		 unsigned long long * const base_arg);
+
+/*
+ * @Function	vid_ra_free
+ * @Description	Used to free a resource segment
+ * @Input	arena_hndl: The arena the segment was originally allocated from
+ * @Input	base_arg: The base of the span
+ * @Return	IMG_SUCCESS or an error code
+ *
+ */
+int vid_ra_free(void * const arena_hndl, unsigned long long base_arg);
+
+#endif
diff --git a/drivers/staging/media/vxd/common/talmmu_api.c b/drivers/staging/media/vxd/common/talmmu_api.c
new file mode 100644
index 000000000000..04ddcc33505c
--- /dev/null
+++ b/drivers/staging/media/vxd/common/talmmu_api.c
@@ -0,0 +1,753 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * TAL MMU Extensions.
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ *	Lakshmi Sankar <lakshmisankar-t@ti.com>
+ *
+ * Re-written for upstream
+ *	Sidraya Jayagond <sidraya.bj@pathpartnertech.com>
+ */
+#include <linux/slab.h>
+#include <linux/printk.h>
+#include <linux/mutex.h>
+#include <linux/dma-mapping.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-mem2mem.h>
+
+#include "img_errors.h"
+#include "lst.h"
+#include "talmmu_api.h"
+
+static int global_init;
+static struct lst_t gl_dmtmpl_lst = {0};
+static struct mutex *global_lock;
+
+static int talmmu_devmem_free(void *mem_hndl)
+{
+	struct talmmu_memory *mem = mem_hndl;
+	struct talmmu_devmem_heap *mem_heap;
+
+	if (!mem_hndl)
+		return IMG_ERROR_INVALID_PARAMETERS;
+
+	mem_heap = mem->devmem_heap;
+
+	if (!mem->ext_dev_virtaddr)
+		addr_cx_free(&mem_heap->ctx, "", mem->dev_virtoffset);
+
+	mutex_lock_nested(global_lock, SUBCLASS_TALMMU);
+
+	lst_remove(&mem_heap->memory_list, mem);
+
+	mutex_unlock(global_lock);
+
+	kfree(mem);
+
+	return IMG_SUCCESS;
+}
+
+/*
+ * talmmu_devmem_heap_empty - talmmu_devmem_heap_empty
+ * @devmem_heap_hndl: device memory heap handle
+ *
+ * This function is used for emptying the device memory heap list
+ */
+int talmmu_devmem_heap_empty(void *devmem_heap_hndl)
+{
+	struct talmmu_devmem_heap *devmem_heap = devmem_heap_hndl;
+
+	if (!devmem_heap)
+		return IMG_ERROR_INVALID_PARAMETERS;
+
+	while (!lst_empty(&devmem_heap->memory_list))
+		talmmu_devmem_free(lst_first(&devmem_heap->memory_list));
+
+	addr_cx_deinitialise(&devmem_heap->ctx);
+
+	return IMG_SUCCESS;
+}
+
+/*
+ * @Function	talmmu_devmem_heap_destroy
+ *
+ * @Description	This function is used for freeing the device memory heap
+ *
+ * @Input
+ *
+ * @Output
+ *
+ * @Return	IMG_SUCCESS or an error code
+ *
+ */
+static void talmmu_devmem_heap_destroy(void *devmem_heap_hndl)
+{
+	struct talmmu_devmem_heap *devmem_heap = devmem_heap_hndl;
+
+	talmmu_devmem_heap_empty(devmem_heap_hndl);
+	kfree(devmem_heap);
+}
+
+/*
+ * @Function	talmmu_init
+ *
+ * @Description	This function is used to initialize the TALMMU component.
+ *
+ * @Input	None.
+ *
+ * @Return	IMG_SUCCESS or an error code
+ *
+ */
+int talmmu_init(void)
+{
+	if (!global_init) {
+		/* If no mutex associated with this resource */
+		if (!global_lock) {
+			/* Create one */
+			global_lock = kzalloc(sizeof(*global_lock), GFP_KERNEL);
+			if (!global_lock)
+				return IMG_ERROR_OUT_OF_MEMORY;
+
+			mutex_init(global_lock);
+		}
+
+		lst_init(&gl_dmtmpl_lst);
+		global_init = 1;
+	}
+
+	return IMG_SUCCESS;
+}
+
+/*
+ * @Function	talmmu_deinit
+ *
+ * @Description	This function is used to de-initialize the TALMMU component.
+ *
+ * @Input	None.
+ *
+ * @Return	IMG_SUCCESS or an error code
+ *
+ */
+int talmmu_deinit(void)
+{
+	struct talmmu_dm_tmpl *t;
+
+	if (global_init) {
+		while (!lst_empty(&gl_dmtmpl_lst)) {
+			t = (struct talmmu_dm_tmpl *)lst_first(&gl_dmtmpl_lst);
+			talmmu_devmem_template_destroy((void *)t);
+		}
+		mutex_destroy(global_lock);
+		kfree(global_lock);
+		global_lock = NULL;
+		global_init = 0;
+	}
+
+	return IMG_SUCCESS;
+}
+
+/*
+ * @Function	talmmu_devmem_template_create
+ *
+ * @Description	This function is used to create a device memory template
+ *
+ * @Input	devmem_info:  A pointer to a talmmu_devmem_info structure.
+ *
+ * @Output	devmem_template_hndl: A pointer used to return the template
+ *		handle
+ *
+ * @Return	IMG_SUCCESS or an error code
+ *
+ */
+int talmmu_devmem_template_create(struct talmmu_devmem_info *devmem_info,
+				  void **devmem_template_hndl)
+{
+	struct talmmu_dm_tmpl *devmem_template;
+	struct talmmu_dm_tmpl *tmp_devmem_template;
+
+	if (!devmem_info)
+		return IMG_ERROR_INVALID_PARAMETERS;
+
+	devmem_template = kzalloc(sizeof(*devmem_template), GFP_KERNEL);
+	if (!devmem_template)
+		return IMG_ERROR_OUT_OF_MEMORY;
+
+	devmem_template->devmem_info = *devmem_info;
+
+	lst_init(&devmem_template->devmem_ctx_list);
+
+	mutex_lock_nested(global_lock, SUBCLASS_TALMMU);
+
+	tmp_devmem_template = lst_first(&gl_dmtmpl_lst);
+	while (tmp_devmem_template)
+		tmp_devmem_template = lst_next(tmp_devmem_template);
+
+	devmem_template->page_num_shift = 12;
+	devmem_template->byte_in_pagemask = 0xFFF;
+	devmem_template->heap_alignment = 0x400000;
+	devmem_template->pagetable_entries_perpage =
+		(devmem_template->devmem_info.page_size / sizeof(unsigned int));
+	devmem_template->pagetable_num_shift = 10;
+	devmem_template->index_in_pagetable_mask = 0x3FF;
+	devmem_template->pagedir_num_shift = 22;
+
+	lst_add(&gl_dmtmpl_lst, devmem_template);
+
+	mutex_unlock(global_lock);
+
+	*devmem_template_hndl = devmem_template;
+
+	return IMG_SUCCESS;
+}
+
+/*
+ * @Function	talmmu_devmem_template_destroy
+ *
+ * @Description This function is used to obtain the template from the list and
+ *		destroy
+ *
+ * @Input	devmem_tmplt_hndl: Device memory template handle
+ *
+ * @Return	IMG_SUCCESS or an error code
+ *
+ */
+int talmmu_devmem_template_destroy(void *devmem_tmplt_hndl)
+{
+	struct talmmu_dm_tmpl *dm_tmpl = devmem_tmplt_hndl;
+	unsigned int i;
+
+	if (!devmem_tmplt_hndl)
+		return IMG_ERROR_INVALID_PARAMETERS;
+
+	while (!lst_empty(&dm_tmpl->devmem_ctx_list))
+		talmmu_devmem_ctx_destroy(lst_first(&dm_tmpl->devmem_ctx_list));
+
+	for (i = 0; i < dm_tmpl->num_heaps; i++)
+		talmmu_devmem_heap_destroy(dm_tmpl->devmem_heap[i]);
+
+	mutex_lock_nested(global_lock, SUBCLASS_TALMMU);
+
+	lst_remove(&gl_dmtmpl_lst, dm_tmpl);
+
+	mutex_unlock(global_lock);
+
+	kfree(dm_tmpl);
+
+	return IMG_SUCCESS;
+}
+
+/*
+ * @Function	talmmu_create_heap
+ *
+ * @Description	This function is used to create a device memory heap
+ *
+ * @Input
+ *
+ * @Output
+ *
+ * @Return	IMG_SUCCESS or an error code
+ *
+ */
+static int talmmu_create_heap(void *devmem_tmplt_hndl,
+			      struct talmmu_heap_info *heap_info_arg,
+			      unsigned char isfull,
+			      struct talmmu_devmem_heap **devmem_heap_arg)
+{
+	struct talmmu_dm_tmpl *devmem_template = devmem_tmplt_hndl;
+	struct talmmu_devmem_heap *devmem_heap;
+
+	/* Allocating memory for device memory heap */
+	devmem_heap = kzalloc(sizeof(*devmem_heap), GFP_KERNEL);
+	if (!devmem_heap)
+		return IMG_ERROR_OUT_OF_MEMORY;
+
+	/*
+	 * Update the device memory heap structure members
+	 * Update the device memory template
+	 */
+	devmem_heap->devmem_template = devmem_template;
+	/* Update the device memory heap information */
+	devmem_heap->heap_info = *heap_info_arg;
+
+	/* Initialize the device memory heap list */
+	lst_init(&devmem_heap->memory_list);
+
+	/* If full structure required */
+	if (isfull) {
+		addr_cx_initialise(&devmem_heap->ctx);
+		devmem_heap->regions.base_addr = 0;
+		devmem_heap->regions.size = devmem_heap->heap_info.size;
+		addr_cx_define_mem_region(&devmem_heap->ctx,
+					  &devmem_heap->regions);
+	}
+
+	*devmem_heap_arg = devmem_heap;
+
+	return IMG_SUCCESS;
+}
+
+/*
+ * @Function	talmmu_devmem_heap_add
+ *
+ * @Description	This function is for creating and adding the heap to the
+ *		device memory template
+ *
+ * @Input	devmem_tmplt_hndl: device memory template handle
+ *
+ * @Input	heap_info_arg: pointer to the heap info structure
+ *
+ * @Return	IMG_SUCCESS or an error code
+ *
+ */
+int talmmu_devmem_heap_add(void *devmem_tmplt_hndl,
+			   struct talmmu_heap_info *heap_info_arg)
+{
+	struct talmmu_dm_tmpl *devmem_template = devmem_tmplt_hndl;
+	struct talmmu_devmem_heap *devmem_heap;
+	unsigned int res;
+
+	if (!devmem_tmplt_hndl)
+		return IMG_ERROR_INVALID_PARAMETERS;
+
+	if (!heap_info_arg)
+		return IMG_ERROR_INVALID_PARAMETERS;
+
+	res = talmmu_create_heap(devmem_tmplt_hndl,
+				 heap_info_arg,
+				 1,
+				 &devmem_heap);
+	if (res != IMG_SUCCESS)
+		return res;
+
+	devmem_template->devmem_heap[devmem_template->num_heaps] = devmem_heap;
+	devmem_template->num_heaps++;
+
+	return IMG_SUCCESS;
+}
+
+/*
+ * @Function	talmmu_devmem_ctx_create
+ *
+ * @Description	This function is used to create a device memory context
+ *
+ * @Input	devmem_tmplt_hndl: pointer to the device memory template handle
+ *
+ * @Input	mmu_ctx_id: MMU context ID used with the TAL
+ *
+ * @Output	devmem_ctx_hndl: pointer to the device memory context handle
+ *
+ * @Return	IMG_SUCCESS or an error code
+ *
+ */
+int talmmu_devmem_ctx_create(void *devmem_tmplt_hndl,
+			     unsigned int mmu_ctx_id,
+			     void **devmem_ctx_hndl)
+{
+	struct talmmu_dm_tmpl *dm_tmpl = devmem_tmplt_hndl;
+	struct talmmu_devmem_ctx *dm_ctx;
+	struct talmmu_devmem_heap *dm_heap;
+	int i;
+	unsigned int res = IMG_SUCCESS;
+
+	if (!devmem_tmplt_hndl)
+		return IMG_ERROR_INVALID_PARAMETERS;
+
+	/* Allocate memory for device memory context */
+	dm_ctx = kzalloc((sizeof(struct talmmu_devmem_ctx)), GFP_KERNEL);
+	if (!dm_ctx)
+		return IMG_ERROR_OUT_OF_MEMORY;
+
+	/*
+	 * Update the device memory context structure members
+	 * Update the device memory template
+	 */
+	dm_ctx->devmem_template = dm_tmpl;
+	/* Update MMU context ID */
+	dm_ctx->mmu_ctx_id = mmu_ctx_id;
+
+	/* Check for PTD Alignment */
+	if (dm_tmpl->devmem_info.ptd_alignment == 0)
+		/*
+		 * Make sure alignment is a multiple of page size.
+		 * Set up PTD alignment to Page Size
+		 */
+		dm_tmpl->devmem_info.ptd_alignment =
+			dm_tmpl->devmem_info.page_size;
+
+	/* Reference or create heaps for this context */
+	for (i = 0; i < dm_tmpl->num_heaps; i++) {
+		dm_heap = dm_tmpl->devmem_heap[i];
+		if (!dm_heap)
+			goto error_heap_create;
+
+		switch (dm_heap->heap_info.heap_type) {
+		case TALMMU_HEAP_PERCONTEXT:
+			res = talmmu_create_heap(dm_tmpl,
+						 &dm_heap->heap_info,
+						 1,
+						 &dm_ctx->devmem_heap[i]);
+			if (res != IMG_SUCCESS)
+				goto error_heap_create;
+			break;
+
+		default:
+			break;
+		}
+
+		dm_ctx->num_heaps++;
+	}
+
+	mutex_lock_nested(global_lock, SUBCLASS_TALMMU);
+
+	/* Add the device memory context to the list */
+	lst_add(&dm_tmpl->devmem_ctx_list, dm_ctx);
+
+	dm_tmpl->num_ctxs++;
+
+	mutex_unlock(global_lock);
+
+	*devmem_ctx_hndl = dm_ctx;
+
+	return IMG_SUCCESS;
+
+error_heap_create:
+	/* Destroy the device memory heaps which were already created */
+	for (i--; i >= 0; i--) {
+		dm_heap = dm_ctx->devmem_heap[i];
+		if (dm_heap->heap_info.heap_type == TALMMU_HEAP_PERCONTEXT)
+			talmmu_devmem_heap_destroy(dm_heap);
+
+		dm_ctx->num_heaps--;
+	}
+	kfree(dm_ctx);
+	return res;
+}
+
+/*
+ * @Function	talmmu_devmem_ctx_destroy
+ *
+ * @Description	This function is used to get the device memory context from
+ *		the list and destroy
+ *
+ * @Input	devmem_ctx_hndl: device memory context handle
+ *
+ * @Return	IMG_SUCCESS or an error code
+ *
+ */
+int talmmu_devmem_ctx_destroy(void *devmem_ctx_hndl)
+{
+	struct talmmu_devmem_ctx *devmem_ctx = devmem_ctx_hndl;
+	struct talmmu_dm_tmpl *devmem_template;
+	struct talmmu_devmem_heap *devmem_heap;
+	unsigned int i;
+
+	if (!devmem_ctx_hndl)
+		return IMG_ERROR_INVALID_PARAMETERS;
+
+	devmem_template = devmem_ctx->devmem_template;
+
+	for (i = 0; i < devmem_ctx->num_heaps; i++) {
+		devmem_heap = devmem_ctx->devmem_heap[i];
+		if (!devmem_heap)
+			return IMG_ERROR_INVALID_PARAMETERS;
+
+		talmmu_devmem_heap_destroy(devmem_heap);
+	}
+
+	devmem_ctx->pagedir = NULL;
+
+	mutex_lock_nested(global_lock, SUBCLASS_TALMMU);
+
+	lst_remove(&devmem_template->devmem_ctx_list, devmem_ctx);
+
+	devmem_ctx->devmem_template->num_ctxs--;
+
+	mutex_unlock(global_lock);
+
+	kfree(devmem_ctx);
+
+	return IMG_SUCCESS;
+}
+
+/*
+ * @Function	talmmu_get_heap_handle
+ *
+ * @Description	This function is used to get the device memory heap handle
+ *
+ * @Input	hid: heap id
+ *
+ * @Input	devmem_ctx_hndl: device memory context handle
+ *
+ * @Output	devmem_heap_hndl: pointer to the device memory heap handle
+ *
+ * @Return	IMG_SUCCESS or an error code
+ *
+ */
+int talmmu_get_heap_handle(unsigned int hid,
+			   void *devmem_ctx_hndl,
+			   void **devmem_heap_hndl)
+{
+	struct talmmu_devmem_ctx *devmem_ctx = devmem_ctx_hndl;
+	unsigned int i;
+
+	if (!devmem_ctx_hndl)
+		return IMG_ERROR_INVALID_PARAMETERS;
+
+	for (i = 0; i < devmem_ctx->num_heaps; i++) {
+		/*
+		 * Checking for requested heap id match and return the device
+		 * memory heap handle
+		 */
+		if (devmem_ctx->devmem_heap[i]->heap_info.heap_id == hid) {
+			*devmem_heap_hndl = devmem_ctx->devmem_heap[i];
+			return IMG_SUCCESS;
+		}
+	}
+
+	return IMG_ERROR_GENERIC_FAILURE;
+}
+
+/*
+ * @Function	talmmu_devmem_heap_options
+ *
+ * @Description	This function is used to set additional heap options
+ *
+ * @Input	devmem_heap_hndl: Handle for heap
+ *
+ * @Input	heap_opt_id: Heap options ID
+ *
+ * @Input	heap_options: Heap options
+ *
+ * @Return	IMG_SUCCESS or an error code
+ *
+ */
+void talmmu_devmem_heap_options(void *devmem_heap_hndl,
+				enum talmmu_heap_option_id heap_opt_id,
+				union talmmu_heap_options heap_options)
+{
+	struct talmmu_devmem_heap *dm_heap = devmem_heap_hndl;
+
+	switch (heap_opt_id) {
+	case TALMMU_HEAP_OPT_ADD_GUARD_BAND:
+		dm_heap->guardband = heap_options.guardband_opt.guardband;
+		break;
+	default:
+		break;
+	}
+}
+
+/*
+ * @Function	talmmu_devmem_malloc_nonmap
+ *
+ * @Description
+ *
+ * @Input
+ *
+ * @Output
+ *
+ * @Return	IMG_SUCCESS or an error code
+ *
+ */
+static int talmmu_devmem_alloc_nonmap(void *devmem_ctx_hndl,
+				      void *devmem_heap_hndl,
+				      unsigned int size,
+				      unsigned int align,
+				      unsigned int dev_virt_ofset,
+				      unsigned char ext_dev_vaddr,
+				      void **mem_hndl)
+{
+	struct talmmu_devmem_ctx *dm_ctx = devmem_ctx_hndl;
+	struct talmmu_dm_tmpl *dm_tmpl;
+	struct talmmu_devmem_heap *dm_heap = devmem_heap_hndl;
+	struct talmmu_memory *mem;
+	unsigned long long ui64_dev_offset = 0;
+	int res = IMG_SUCCESS;
+
+	if (!dm_ctx)
+		return IMG_ERROR_INVALID_PARAMETERS;
+
+	if (!devmem_heap_hndl)
+		return IMG_ERROR_INVALID_PARAMETERS;
+
+	dm_tmpl = dm_ctx->devmem_template;
+
+	/* Allocate memory for memory structure */
+	mem = kzalloc((sizeof(struct talmmu_memory)), GFP_KERNEL);
+	if (!mem)
+		return IMG_ERROR_OUT_OF_MEMORY;
+
+	mem->devmem_heap = dm_heap;
+	mem->devmem_ctx = dm_ctx;
+	mem->ext_dev_virtaddr = ext_dev_vaddr;
+
+	/* We always for to be at least page aligned */
+	if (align >= dm_tmpl->devmem_info.page_size)
+		/*
+		 * alignment is larger than page size - make sure alignment is
+		 * a multiple of page size
+		 */
+		mem->alignment = align;
+	else
+		/*
+		 * alignment is smaller than page size - make sure page size is
+		 * a multiple of alignment. Now round up alignment to one page
+		 */
+		mem->alignment = dm_tmpl->devmem_info.page_size;
+
+	/* Round size up to next multiple of physical pages */
+	if ((size % dm_tmpl->devmem_info.page_size) != 0)
+		mem->size = ((size / dm_tmpl->devmem_info.page_size)
+			+ 1) * dm_tmpl->devmem_info.page_size;
+	else
+		mem->size = size;
+
+	/* If the device virtual address was externally defined */
+	if (mem->ext_dev_virtaddr) {
+		res = IMG_ERROR_INVALID_PARAMETERS;
+		goto free_mem;
+	}
+
+	res = addr_cx_malloc_align_res(&dm_heap->ctx, "",
+				       (mem->size + dm_heap->guardband),
+				       mem->alignment,
+				       &ui64_dev_offset);
+
+	mem->dev_virtoffset = (unsigned int)ui64_dev_offset;
+	if (res != IMG_SUCCESS)
+		/*
+		 * If heap space is unavaliable return NULL, the caller must
+		 * handle this condition
+		 */
+		goto free_virt;
+
+	mutex_lock_nested(global_lock, SUBCLASS_TALMMU);
+
+	/*
+	 * Add memory allocation to the list for this heap...
+	 * If the heap is empty...
+	 */
+	if (lst_empty(&dm_heap->memory_list))
+		/*
+		 * Save flag to indicate whether the device virtual address
+		 * is allocated internally or externally...
+		 */
+		dm_heap->ext_dev_virtaddr = mem->ext_dev_virtaddr;
+
+	/*
+	 * Once we have started allocating in one way ensure that we continue
+	 * to do this...
+	 */
+	lst_add(&dm_heap->memory_list, mem);
+
+	mutex_unlock(global_lock);
+
+	*mem_hndl = mem;
+
+	return IMG_SUCCESS;
+
+free_virt:
+	addr_cx_free(&dm_heap->ctx, "", mem->dev_virtoffset);
+free_mem:
+	kfree(mem);
+
+	return res;
+}
+
+/*
+ * @Function	talmmu_devmem_addr_alloc
+ *
+ * @Description
+ *
+ * @Input
+ *
+ * @Output
+ *
+ * @Return	IMG_SUCCESS or an error code
+ *
+ */
+int talmmu_devmem_addr_alloc(void *devmem_ctx_hndl,
+			     void *devmem_heap_hndl,
+			     unsigned int size,
+			     unsigned int align,
+			     void **mem_hndl)
+{
+	unsigned int res;
+	void *mem;
+
+	res = talmmu_devmem_alloc_nonmap(devmem_ctx_hndl,
+					 devmem_heap_hndl,
+					 size,
+					 align,
+					 0,
+					 0,
+					 &mem);
+	if (res != IMG_SUCCESS)
+		return res;
+
+	*mem_hndl = mem;
+
+	return IMG_SUCCESS;
+}
+
+/*
+ * @Function	talmmu_devmem_addr_free
+ *
+ * @Description	This function is used to free device memory allocated using
+ *		talmmu_devmem_addr_alloc().
+ *
+ * @Input	mem_hndl : Handle for the memory object
+ *
+ * @Return	IMG_SUCCESS or an error code
+ *
+ */
+int talmmu_devmem_addr_free(void *mem_hndl)
+{
+	unsigned int res;
+
+	if (!mem_hndl)
+		return IMG_ERROR_INVALID_PARAMETERS;
+
+	/* free device memory allocated by calling talmmu_devmem_free() */
+	res = talmmu_devmem_free(mem_hndl);
+
+	return res;
+}
+
+/*
+ * @Function	talmmu_get_dev_virt_addr
+ *
+ * @Description	This function is use to obtain the device (virtual) memory
+ *		address which may be required for as a device virtual address
+ *		in some of the TAL image functions
+ *
+ * @Input	mem_hndl : Handle for the memory object
+ *
+ * @Output	dev_virt: A piointer used to return the device virtual address
+ *
+ * @Return	IMG_SUCCESS or an error code
+ *
+ */
+int talmmu_get_dev_virt_addr(void *mem_hndl,
+			     unsigned int *dev_virt)
+{
+	struct talmmu_memory *mem = mem_hndl;
+	struct talmmu_devmem_heap *devmem_heap;
+
+	if (!mem_hndl)
+		return IMG_ERROR_INVALID_PARAMETERS;
+
+	devmem_heap = mem->devmem_heap;
+
+	/*
+	 * Device virtual address is addition of the specific device virtual
+	 * offset and the base device virtual address from the heap information
+	 */
+	*dev_virt = (devmem_heap->heap_info.basedev_virtaddr +
+		mem->dev_virtoffset);
+
+	return IMG_SUCCESS;
+}
diff --git a/drivers/staging/media/vxd/common/talmmu_api.h b/drivers/staging/media/vxd/common/talmmu_api.h
new file mode 100644
index 000000000000..f37f78394d54
--- /dev/null
+++ b/drivers/staging/media/vxd/common/talmmu_api.h
@@ -0,0 +1,246 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * TAL MMU Extensions.
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ *	Lakshmi Sankar <lakshmisankar-t@ti.com>
+ *
+ * Re-written for upstream
+ *	Sidraya Jayagond <sidraya.bj@pathpartnertech.com>
+ */
+#include "addr_alloc.h"
+#include "ra.h"
+#include "lst.h"
+
+#ifndef __TALMMU_API_H__
+#define __TALMMU_API_H__
+
+#define	TALMMU_MAX_DEVICE_HEAPS	(32)
+#define	TALMMU_MAX_TEMPLATES	(32)
+
+/* MMU type */
+enum talmmu_mmu_type {
+	/* 4kb pages and 32-bit address range */
+	TALMMU_MMUTYPE_4K_PAGES_32BIT_ADDR = 0x1,
+	/* variable size pages and 32-bit address */
+	TALMMU_MMUTYPE_VAR_PAGES_32BIT_ADDR,
+	/* 4kb pages and 36-bit address range */
+	TALMMU_MMUTYPE_4K_PAGES_36BIT_ADDR,
+	/* 4kb pages and 40-bit address range */
+	TALMMU_MMUTYPE_4K_PAGES_40BIT_ADDR,
+	/* variable size pages and 40-bit address range */
+	TALMMU_MMUTYPE_VP_40BIT,
+	TALMMU_MMUTYPE_FORCE32BITS = 0x7FFFFFFFU
+};
+
+/* Device flags */
+enum talmmu_dev_flags {
+	TALMMU_DEVFLAGS_NONE = 0x0,
+	TALMMU_DEVFLAGS_FORCE32BITS = 0x7FFFFFFFU
+};
+
+/* Heap type */
+enum talmmu_heap_type {
+	TALMMU_HEAP_SHARED_EXPORTED,
+	TALMMU_HEAP_PERCONTEXT,
+	TALMMU_HEAP_FORCE32BITS = 0x7FFFFFFFU
+};
+
+/* Heap flags */
+enum talmmu_eheapflags {
+	TALMMU_HEAPFLAGS_NONE = 0x0,
+	TALMMU_HEAPFLAGS_SET_CACHE_CONSISTENCY = 0x00000001,
+	TALMMU_HEAPFLAGS_128BYTE_INTERLEAVE = 0x00000002,
+	TALMMU_HEAPFLAGS_256BYTE_INTERLEAVE = 0x00000004,
+	TALMMU_HEAPFLAGS_FORCE32BITS = 0x7FFFFFFFU
+};
+
+/* Contains the device memory information */
+struct talmmu_devmem_info {
+	/* device id */
+	unsigned int device_id;
+	/* mmu type */
+	enum talmmu_mmu_type mmu_type;
+	/* Device flags - bit flags that can be combined */
+	enum talmmu_dev_flags dev_flags;
+	/* Name of the memory space for page directory allocations */
+	unsigned char *pagedir_memspace_name;
+	/* Name of the memory space for page table allocations */
+	unsigned char *pagetable_memspace_name;
+	/* Page size in bytes */
+	unsigned int page_size;
+	/* PTD alignment, must be multiple of Page size */
+	unsigned int ptd_alignment;
+};
+
+struct talmmu_heap_info {
+	/* heap id */
+	unsigned int heap_id;
+	/* heap type */
+	enum talmmu_heap_type heap_type;
+	/* heap flags - bit flags that can be combined */
+	enum talmmu_eheapflags heap_flags;
+	/* Name of the memory space for memory allocations */
+	unsigned char *memspace_name;
+	/* Base device virtual address */
+	unsigned int basedev_virtaddr;
+	/* size in bytes */
+	unsigned int size;
+};
+
+/* Device memory template information */
+struct talmmu_dm_tmpl {
+	/* list */
+	struct lst_t list;
+	/* Copy of device memory info structure */
+	struct talmmu_devmem_info devmem_info;
+	/* Memory space ID for PTD allocations */
+	void *ptd_memspace_hndl;
+	/* Memory space ID for Page Table allocations */
+	void *ptentry_memspace_hndl;
+	/* number of heaps */
+	unsigned int num_heaps;
+	/* Array of heap pointers */
+	struct talmmu_devmem_heap *devmem_heap[TALMMU_MAX_DEVICE_HEAPS];
+	/* Number of active contexts */
+	unsigned int num_ctxs;
+	/* List of device memory context created from this template */
+	struct lst_t devmem_ctx_list;
+	/* Number of bits to shift right to obtain page number */
+	unsigned int page_num_shift;
+	/* Mask to extract byte-within-page */
+	unsigned int byte_in_pagemask;
+	/* Heap alignment */
+	unsigned int heap_alignment;
+	/* Page table entries/page */
+	unsigned int pagetable_entries_perpage;
+	/* Number of bits to shift right to obtain page table number */
+	unsigned int pagetable_num_shift;
+	/* Mask to extract index-within-page-table */
+	unsigned int index_in_pagetable_mask;
+	/* Number of bits to shift right to obtain page dir number */
+	unsigned int pagedir_num_shift;
+};
+
+/* Device memory heap information */
+struct talmmu_devmem_heap {
+	/* list item */
+	struct lst_t list;
+	/* Copy of the heap info structure */
+	struct talmmu_heap_info heap_info;
+	/* Pointer to the device memory template */
+	struct talmmu_dm_tmpl *devmem_template;
+	/* true if device virtual address offset allocated externally by user */
+	unsigned int ext_dev_virtaddr;
+	/* list of memory allocations */
+	struct lst_t memory_list;
+	/* Memory space ID for memory allocations */
+	void *memspace_hndl;
+	/* Address context structure */
+	struct addr_context ctx;
+	/* Regions structure */
+	struct addr_region regions;
+	/* size of heap guard band */
+	unsigned int guardband;
+};
+
+struct talmmu_devmem_ctx {
+	/* list item */
+	struct lst_t list;
+	/* Pointer to device template */
+	struct talmmu_dm_tmpl *devmem_template;
+	/* No. of heaps */
+	unsigned int num_heaps;
+	/* Array of heap pointers */
+	struct talmmu_devmem_heap *devmem_heap[TALMMU_MAX_DEVICE_HEAPS];
+	/* The MMU context id */
+	unsigned int mmu_ctx_id;
+	/* Pointer to the memory that represents Page directory */
+	unsigned int *pagedir;
+};
+
+struct talmmu_memory {
+	/* list item */
+	struct lst_t list;
+	/* Heap from which memory was allocated */
+	struct talmmu_devmem_heap *devmem_heap;
+	/* Context through which memory was allocated */
+	struct talmmu_devmem_ctx *devmem_ctx;
+	/* size */
+	unsigned int size;
+	/* alignment */
+	unsigned int alignment;
+	/* device virtual offset of allocation */
+	unsigned int dev_virtoffset;
+	/* true if device virtual address offset allocated externally by user */
+	unsigned int ext_dev_virtaddr;
+};
+
+/* This type defines the event types for the TALMMU callbacks */
+enum talmmu_event {
+	/* Function to flush the cache. */
+	TALMMU_EVENT_FLUSH_CACHE,
+	/*! Function to write the page directory address to the device */
+	TALMMU_EVENT_WRITE_PAGE_DIRECTORY_REF,
+	/* Placeholder*/
+	TALMMU_NO_OF_EVENTS
+};
+
+enum talmmu_heap_option_id {
+	/* Add guard band to all mallocs */
+	TALMMU_HEAP_OPT_ADD_GUARD_BAND,
+	TALMMU_HEAP_OPT_SET_MEM_ATTRIB,
+	TALMMU_HEAP_OPT_SET_MEM_POOL,
+
+	/* Placeholder */
+	TALMMU_NO_OF_OPTIONS,
+	TALMMU_NO_OF_FORCE32BITS = 0x7FFFFFFFU
+};
+
+struct talmmu_guardband_options {
+	unsigned int guardband;
+};
+
+union talmmu_heap_options {
+	/* Guardband parameters */
+	struct talmmu_guardband_options guardband_opt;
+};
+
+int talmmu_init(void);
+int talmmu_deinit(void);
+int talmmu_devmem_template_create(struct talmmu_devmem_info *devmem_info,
+				  void **devmem_template_hndl);
+int talmmu_devmem_heap_add(void *devmem_tmplt_hndl,
+			   struct talmmu_heap_info *heap_info_arg);
+int talmmu_devmem_template_destroy(void *devmem_tmplt_hndl);
+int talmmu_devmem_ctx_create(void *devmem_tmplt_hndl,
+			     unsigned int mmu_ctx_id,
+			     void **devmem_ctx_hndl);
+int talmmu_devmem_ctx_destroy(void *devmem_ctx_hndl);
+int talmmu_get_heap_handle(unsigned int hid,
+			   void *devmem_ctx_hndl,
+			   void **devmem_heap_hndl);
+/**
+ * talmmu_devmem_heap_empty - talmmu_devmem_heap_empty
+ * @devmem_heap_hndl: device memory heap handle
+ *
+ * This function is used for emptying the device memory heap list
+ */
+
+int talmmu_devmem_heap_empty(void *devmem_heap_hndl);
+void talmmu_devmem_heap_options(void *devmem_heap_hndl,
+				enum talmmu_heap_option_id heap_opt_id,
+				union talmmu_heap_options heap_options);
+int talmmu_devmem_addr_alloc(void *devmem_ctx_hndl,
+			     void *devmem_heap_hndl,
+			     unsigned int size,
+			     unsigned int align,
+			     void **mem_hndl);
+int talmmu_devmem_addr_free(void *mem_hndl);
+int talmmu_get_dev_virt_addr(void *mem_hndl,
+			     unsigned int *dev_virt);
+
+#endif /* __TALMMU_API_H__ */
-- 
2.17.1


-- 






This
message contains confidential information and is intended only 
for the
individual(s) named. If you are not the intended
recipient, you are 
notified that disclosing, copying, distributing or taking any
action in 
reliance on the contents of this mail and attached file/s is strictly
prohibited. Please notify the
sender immediately and delete this e-mail 
from your system. E-mail transmission
cannot be guaranteed to be secured or 
error-free as information could be
intercepted, corrupted, lost, destroyed, 
arrive late or incomplete, or contain
viruses. The sender therefore does 
not accept liability for any errors or
omissions in the contents of this 
message, which arise as a result of e-mail
transmission.

^ permalink raw reply related	[flat|nested] 48+ messages in thread

* [PATCH 12/30] v4l: vxd-dec: Add VDEC MMU wrapper
  2021-08-18 14:10 [PATCH 00/30] TI Video Decoder driver upstreaming to v5.14-rc6 kernel sidraya.bj
                   ` (10 preceding siblings ...)
  2021-08-18 14:10 ` [PATCH 11/30] v4l: vxd-dec: Add TALMMU module sidraya.bj
@ 2021-08-18 14:10 ` sidraya.bj
  2021-08-18 14:10 ` [PATCH 13/30] v4l: vxd-dec: Add Bistream Preparser (BSPP) module sidraya.bj
                   ` (19 subsequent siblings)
  31 siblings, 0 replies; 48+ messages in thread
From: sidraya.bj @ 2021-08-18 14:10 UTC (permalink / raw)
  To: gregkh, linux-staging, linux-kernel
  Cc: prashanth.ka, praneeth, mchehab, linux-media, praveen.ap, Sidraya

From: Sidraya <sidraya.bj@pathpartnertech.com>

It contains VDEC MMU APIs, used for buffer mapping, generating
virtual addresses. It will use TALMMU APIs.

Signed-off-by: Lakshmi Sankar <lakshmisankar-t@ti.com>
Signed-off-by: Sidraya <sidraya.bj@pathpartnertech.com>
---
 MAINTAINERS                                   |   2 +
 .../media/vxd/decoder/vdec_mmu_wrapper.c      | 829 ++++++++++++++++++
 .../media/vxd/decoder/vdec_mmu_wrapper.h      | 174 ++++
 3 files changed, 1005 insertions(+)
 create mode 100644 drivers/staging/media/vxd/decoder/vdec_mmu_wrapper.c
 create mode 100644 drivers/staging/media/vxd/decoder/vdec_mmu_wrapper.h

diff --git a/MAINTAINERS b/MAINTAINERS
index 2b0d0708d852..6c3f7a55ce9b 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -19565,6 +19565,8 @@ F:	drivers/staging/media/vxd/decoder/hw_control.h
 F:	drivers/staging/media/vxd/decoder/img_dec_common.h
 F:	drivers/staging/media/vxd/decoder/translation_api.c
 F:	drivers/staging/media/vxd/decoder/translation_api.h
+F:	drivers/staging/media/vxd/decoder/vdec_mmu_wrapper.c
+F:	drivers/staging/media/vxd/decoder/vdec_mmu_wrapper.h
 F:	drivers/staging/media/vxd/decoder/vxd_core.c
 F:	drivers/staging/media/vxd/decoder/vxd_dec.c
 F:	drivers/staging/media/vxd/decoder/vxd_dec.h
diff --git a/drivers/staging/media/vxd/decoder/vdec_mmu_wrapper.c b/drivers/staging/media/vxd/decoder/vdec_mmu_wrapper.c
new file mode 100644
index 000000000000..384ce840b4dc
--- /dev/null
+++ b/drivers/staging/media/vxd/decoder/vdec_mmu_wrapper.c
@@ -0,0 +1,829 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * VDEC MMU Functions
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ *	Lakshmi Sankar <lakshmisankar-t@ti.com>
+ *
+ * Re-written for upstream
+ *	Sidraya Jayagond <sidraya.bj@pathpartnertech.com>
+ */
+
+#include "img_dec_common.h"
+#include "lst.h"
+#include "talmmu_api.h"
+#include "vdec_defs.h"
+#include "vdec_mmu_wrapper.h"
+#include "vxd_dec.h"
+
+#define GUARD_BAND  0x1000
+
+struct mmuheap {
+	unsigned char *name;
+	enum mmu_eheap_id heap_id;
+	enum talmmu_heap_type heap_type;
+	unsigned int start_offset;
+	unsigned int size;
+	unsigned char *mem_space;
+	unsigned char use_guard_band;
+	unsigned char image_buffers;
+};
+
+static const struct mmuheap mmu_heaps[MMU_HEAP_MAX] = {
+	{ "Image untiled", MMU_HEAP_IMAGE_BUFFERS_UNTILED,
+	  TALMMU_HEAP_PERCONTEXT, PVDEC_HEAP_UNTILED_START,
+	  PVDEC_HEAP_UNTILED_SIZE, "MEMBE", 1, 1 },
+
+	{ "Bitstream", MMU_HEAP_BITSTREAM_BUFFERS,
+	  TALMMU_HEAP_PERCONTEXT, PVDEC_HEAP_BITSTREAM_START,
+	  PVDEC_HEAP_BITSTREAM_SIZE, "MEMDMAC_02", 1, 0 },
+
+	{ "Stream", MMU_HEAP_STREAM_BUFFERS,
+	  TALMMU_HEAP_PERCONTEXT, PVDEC_HEAP_STREAM_START,
+	  PVDEC_HEAP_STREAM_SIZE, "MEM", 1, 0 },
+};
+
+/*
+ * @Heap ID
+ * @Heap type
+ * @Heap flags
+ * @Memory space name
+ * @Start address (virtual)
+ * @Size of heap, in bytes
+ */
+static struct talmmu_heap_info heap_info = {
+	MMU_HEAP_IMAGE_BUFFERS_UNTILED,
+	TALMMU_HEAP_PERCONTEXT,
+	TALMMU_HEAPFLAGS_NONE,
+	"MEMBE",
+	0,
+	0,
+};
+
+/*
+ * This structure contains the device context.
+ * @brief  VDECDD MMU Device Context
+ * @devmem_template_hndl: Handle for MMU template.
+ * @devmem_ctx_hndl: Handle for MMU context.
+ * @str_list: List of streams.
+ */
+struct mmu_dev_context {
+	void *devmem_template_hndl;
+	void *devmem_ctx_hndl;
+	struct lst_t str_list;
+	unsigned int ctx_id;
+	unsigned int next_ctx_id;
+};
+
+/*
+ * This structure contains the stream context.
+ * @brief  VDECDD MMU Stream Context
+ * @LST_LINK: List link (allows the structure to be part of a MeOS list).
+ * @devmem_ctx_hndl: Handle for MMU context.
+ * @dev_ctx: Pointer to device context.
+ * @ctx_id: MMU context Id.
+ * km_str_id: Stream ID used in communication with new KM interface
+ */
+struct mmu_str_context {
+	void **link;
+	void *devmem_ctx_hndl;
+	struct mmu_dev_context *dev_ctx;
+	unsigned int ctx_id;
+	void *ptd_memspace_hndl;
+	unsigned int int_reg_num;
+	unsigned int km_str_id;
+	struct vxd_dec_ctx *vxd_dec_context;
+};
+
+static unsigned int set_attributes(enum sys_emem_attrib mem_attrib)
+{
+	unsigned int attrib = 0;
+
+	if (mem_attrib & SYS_MEMATTRIB_CACHED)
+		attrib |= MEM_ATTR_CACHED;
+
+	if (mem_attrib & SYS_MEMATTRIB_UNCACHED)
+		attrib |= MEM_ATTR_UNCACHED;
+
+	if (mem_attrib & SYS_MEMATTRIB_WRITECOMBINE)
+		attrib |= MEM_ATTR_WRITECOMBINE;
+
+	if (mem_attrib & SYS_MEMATTRIB_SECURE)
+		attrib |= MEM_ATTR_SECURE;
+
+	return attrib;
+}
+
+/*
+ * @Function	mmu_dev_mem_context_create
+ */
+static int mmu_devmem_context_create(struct mmu_dev_context *dev_ctx, void **mmu_ctx_hndl)
+{
+	int result;
+	void *devmem_heap_hndl;
+	union talmmu_heap_options heap_opt1;
+	unsigned int i;
+	unsigned char use_guardband;
+	enum talmmu_heap_option_id heap_option_id;
+
+	dev_ctx->next_ctx_id++;
+
+	/* Create a context from the template */
+	result = talmmu_devmem_ctx_create(dev_ctx->devmem_template_hndl, dev_ctx->next_ctx_id,
+					  mmu_ctx_hndl);
+	if (result != IMG_SUCCESS)
+		return result;
+
+	/* Apply options to heaps. */
+	heap_opt1.guardband_opt.guardband = GUARD_BAND;
+
+	for (i = 0; i < MMU_HEAP_MAX; i++) {
+		result = talmmu_get_heap_handle(mmu_heaps[i].heap_id, *mmu_ctx_hndl,
+						&devmem_heap_hndl);
+		if (result != IMG_SUCCESS)
+			return result;
+
+		use_guardband = mmu_heaps[i].use_guard_band;
+		heap_option_id = TALMMU_HEAP_OPT_ADD_GUARD_BAND;
+		if (use_guardband)
+			talmmu_devmem_heap_options(devmem_heap_hndl, heap_option_id, heap_opt1);
+	}
+
+	return IMG_SUCCESS;
+}
+
+/*
+ * @Function	mmu_device_create
+ */
+int mmu_device_create(enum mmu_etype mmu_type_arg,
+		      unsigned int ptd_alignment,
+		      void **mmudev_handle)
+{
+	int result = IMG_SUCCESS;
+	enum talmmu_mmu_type talmmu_type =
+		TALMMU_MMUTYPE_4K_PAGES_32BIT_ADDR;
+	unsigned int i;
+	struct mmu_dev_context *dev_ctx;
+	struct talmmu_devmem_info dev_mem_info;
+
+	/* Set the TAL MMU type. */
+	switch (mmu_type_arg) {
+	case MMU_TYPE_32BIT:
+		talmmu_type = TALMMU_MMUTYPE_4K_PAGES_32BIT_ADDR;
+		break;
+
+	case MMU_TYPE_36BIT:
+		talmmu_type = TALMMU_MMUTYPE_4K_PAGES_36BIT_ADDR;
+		break;
+
+	case MMU_TYPE_40BIT:
+		talmmu_type = TALMMU_MMUTYPE_4K_PAGES_40BIT_ADDR;
+		break;
+
+	default:
+		return IMG_ERROR_INVALID_PARAMETERS;
+	}
+
+	/* Allocate a device context structure */
+	dev_ctx = kzalloc(sizeof(*dev_ctx), GFP_KERNEL);
+	if (!dev_ctx)
+		return IMG_ERROR_OUT_OF_MEMORY;
+
+	/* Initialise stream list. */
+	lst_init(&dev_ctx->str_list);
+
+	/* Initialise TALMMU. */
+	result = talmmu_init();
+	if (result != IMG_SUCCESS)
+		goto error_tal_init;
+
+	dev_mem_info.device_id = 0;
+	dev_mem_info.mmu_type = talmmu_type;
+	dev_mem_info.dev_flags = TALMMU_DEVFLAGS_NONE;
+	dev_mem_info.pagedir_memspace_name = "MEM";
+	dev_mem_info.pagetable_memspace_name = NULL;
+	dev_mem_info.page_size = DEV_MMU_PAGE_SIZE;
+	dev_mem_info.ptd_alignment = ptd_alignment;
+
+	result = talmmu_devmem_template_create(&dev_mem_info, &dev_ctx->devmem_template_hndl);
+	if (result != IMG_SUCCESS)
+		goto error_tal_template;
+
+	/* Add heaps to template */
+	for (i = 0; i < MMU_HEAP_MAX; i++) {
+		heap_info.heap_id = mmu_heaps[i].heap_id;
+		heap_info.heap_type = mmu_heaps[i].heap_type;
+		heap_info.memspace_name = mmu_heaps[i].name;
+		heap_info.size = mmu_heaps[i].size;
+		heap_info.basedev_virtaddr = mmu_heaps[i].start_offset;
+
+		result = talmmu_devmem_heap_add(dev_ctx->devmem_template_hndl, &heap_info);
+		if (result != IMG_SUCCESS)
+			goto error_tal_heap;
+	}
+
+	/* Create the device context. */
+	result = mmu_devmem_context_create(dev_ctx, &dev_ctx->devmem_ctx_hndl);
+	if (result != IMG_SUCCESS)
+		goto error_mmu_context;
+
+	dev_ctx->ctx_id = dev_ctx->next_ctx_id;
+
+	/* Return the device context. */
+	*mmudev_handle = dev_ctx;
+
+	return IMG_SUCCESS;
+
+	/* Roll back in case of errors. */
+error_mmu_context:
+error_tal_heap:
+	talmmu_devmem_template_destroy(dev_ctx->devmem_template_hndl);
+error_tal_template:
+	talmmu_deinit();
+error_tal_init:
+	kfree(dev_ctx);
+	return result;
+}
+
+/*
+ * @Function	mmu_device_destroy
+ */
+int mmu_device_destroy(void *mmudev_handle)
+{
+	struct mmu_dev_context *dev_ctx = mmudev_handle;
+	unsigned int result;
+	struct mmu_str_context *str_ctx;
+
+	/* Validate inputs. */
+	if (!mmudev_handle)
+		return IMG_ERROR_INVALID_PARAMETERS;
+
+	/* Destroy all streams associated with the device. */
+	str_ctx = lst_first(&dev_ctx->str_list);
+	while (str_ctx) {
+		result = mmu_stream_destroy(str_ctx);
+		if (result != IMG_SUCCESS)
+			return result;
+		/* See if there are more streams. */
+		str_ctx = lst_first(&dev_ctx->str_list);
+	}
+
+	/* Destroy the device context */
+	result = talmmu_devmem_ctx_destroy(dev_ctx->devmem_ctx_hndl);
+	if (result != IMG_SUCCESS)
+		return result;
+
+	/* Destroy the template. */
+	result = talmmu_devmem_template_destroy(dev_ctx->devmem_template_hndl);
+	if (result != IMG_SUCCESS)
+		return result;
+
+	talmmu_deinit();
+
+	kfree(dev_ctx);
+	return IMG_SUCCESS;
+}
+
+/*
+ * @Function	mmu_stream_create
+ * @Description
+ * This function is used to create and initialise the MMU stream context.
+ * @Input	mmudev_handle : The MMU device handle.
+ * @Input	km_str_id : Stream Id used in communication with KM driver.
+ * @Output	mmu_str_hndl : A pointer used to return the MMU stream
+ *		handle.
+ * @Return	IMG_SUCCESS or an error code.
+ */
+int mmu_stream_create(void *mmudev_handle,
+		      unsigned int km_str_id,
+		      void *vxd_dec_ctx_arg,
+		      void **mmu_str_hndl)
+{
+	struct mmu_dev_context *dev_ctx = mmudev_handle;
+	struct mmu_str_context *str_ctx;
+	int res;
+
+	/* Validate inputs. */
+	if (!mmudev_handle)
+		return IMG_ERROR_INVALID_PARAMETERS;
+
+	/* Allocate a stream context structure */
+	str_ctx = kzalloc(sizeof(*str_ctx), GFP_KERNEL);
+	if (!str_ctx)
+		return IMG_ERROR_OUT_OF_MEMORY;
+
+	str_ctx->km_str_id = km_str_id;
+	str_ctx->dev_ctx = dev_ctx;
+	str_ctx->int_reg_num = 32;
+	str_ctx->vxd_dec_context = (struct vxd_dec_ctx *)vxd_dec_ctx_arg;
+
+	/* Create a stream context. */
+	res = mmu_devmem_context_create(dev_ctx, &str_ctx->devmem_ctx_hndl);
+	if (res != IMG_SUCCESS) {
+		kfree(str_ctx);
+		return res;
+	}
+
+	str_ctx->ctx_id = dev_ctx->next_ctx_id;
+
+	/* Add stream to list. */
+	lst_add(&dev_ctx->str_list, str_ctx);
+
+	*mmu_str_hndl = str_ctx;
+
+	return IMG_SUCCESS;
+}
+
+/*
+ * @Function	mmu_stream_destroy
+ * @Description
+ * This function is used to create and initialise the MMU stream context.
+ * NOTE: Destroy automatically frees and memory allocated using
+ *	mmu_stream_malloc().
+ * @Input	mmu_str_hndl : The MMU stream handle.
+ * @Return	IMG_SUCCESS or an error code.
+ */
+int mmu_stream_destroy(void *mmu_str_hndl)
+{
+	struct mmu_str_context *str_ctx = mmu_str_hndl;
+	int res;
+
+	/* Validate inputs. */
+	if (!mmu_str_hndl)
+		return IMG_ERROR_INVALID_PARAMETERS;
+
+	/* remove stream to list. */
+	lst_remove(&str_ctx->dev_ctx->str_list, str_ctx);
+
+	/* Destroy the device context */
+	res = talmmu_devmem_ctx_destroy(str_ctx->devmem_ctx_hndl);
+	if (res != IMG_SUCCESS)
+		return res;
+
+	kfree(str_ctx);
+
+	return IMG_SUCCESS;
+}
+
+/*
+ * @Function	mmu_malloc
+ */
+static int mmu_alloc(void *devmem_ctx_hndl,
+		     struct vxd_dec_ctx *vxd_dec_ctx_arg,
+		     enum mmu_eheap_id heap_id,
+		     unsigned int mem_heap_id,
+		     enum sys_emem_attrib mem_attrib,
+		     unsigned int size,
+		     unsigned int alignment,
+		     struct vidio_ddbufinfo *ddbuf_info)
+{
+	int result;
+	void *devmem_heap_hndl;
+	struct vxd_free_data free_data;
+	struct vxd_dec_ctx *ctx;
+	struct vxd_dev *vxd;
+	struct vxd_alloc_data alloc_data;
+	unsigned int flags;
+
+	if (!devmem_ctx_hndl)
+		return IMG_ERROR_INVALID_PARAMETERS;
+
+	/* Set buffer size. */
+	ddbuf_info->buf_size = size;
+
+	/* Round size up to next multiple of physical pages */
+	if ((size % HOST_MMU_PAGE_SIZE) != 0)
+		size = ((size / HOST_MMU_PAGE_SIZE) + 1) * HOST_MMU_PAGE_SIZE;
+
+	/* Allocate memory */
+	ctx = vxd_dec_ctx_arg;
+	vxd = ctx->dev;
+
+	alloc_data.heap_id = mem_heap_id;
+	alloc_data.size = ddbuf_info->buf_size;
+
+	alloc_data.attributes = set_attributes(mem_attrib);
+
+	result = img_mem_alloc(vxd->dev, ctx->mem_ctx, alloc_data.heap_id, alloc_data.size,
+			       (enum mem_attr)alloc_data.attributes,
+			       (int *)&ddbuf_info->buff_id);
+	if (result != IMG_SUCCESS)
+		goto error_alloc;
+
+	ddbuf_info->is_internal = 1;
+
+	if (mem_attrib & SYS_MEMATTRIB_SECURE) {
+		ddbuf_info->cpu_virt = NULL;
+	} else {
+		/* Map the buffer to CPU */
+		result = img_mem_map_km(ctx->mem_ctx, ddbuf_info->buff_id);
+		if (result) {
+			dev_err(vxd->dev, "%s: failed to map buf to cpu!(%d)\n", __func__, result);
+			goto error_get_heap_handle;
+		}
+		ddbuf_info->cpu_virt = img_mem_get_kptr(ctx->mem_ctx, ddbuf_info->buff_id);
+	}
+
+	/* Get heap handle */
+	result = talmmu_get_heap_handle(heap_id, devmem_ctx_hndl, &devmem_heap_hndl);
+	if (result != IMG_SUCCESS)
+		goto error_get_heap_handle;
+
+	/* Allocate device "virtual" memory. */
+	result = talmmu_devmem_addr_alloc(devmem_ctx_hndl, devmem_heap_hndl, size, alignment,
+					  &ddbuf_info->hndl_memory);
+	if (result != IMG_SUCCESS)
+		goto error_mem_map_ext_mem;
+
+	/* Get the device virtual address. */
+	result = talmmu_get_dev_virt_addr(ddbuf_info->hndl_memory, &ddbuf_info->dev_virt);
+	if (result != IMG_SUCCESS)
+		goto error_get_dev_virt_addr;
+
+	flags = VXD_MAP_FLAG_NONE;
+
+	if (mem_attrib & SYS_MEMATTRIB_CORE_READ_ONLY)
+		flags |= VXD_MAP_FLAG_READ_ONLY;
+
+	if (mem_attrib & SYS_MEMATTRIB_CORE_WRITE_ONLY)
+		flags |= VXD_MAP_FLAG_WRITE_ONLY;
+
+	result = vxd_map_buffer(vxd, ctx, ddbuf_info->kmstr_id, ddbuf_info->buff_id,
+				ddbuf_info->dev_virt,
+				flags);
+
+	if (result != IMG_SUCCESS)
+		goto error_map_dev;
+
+	return IMG_SUCCESS;
+
+error_map_dev:
+error_get_dev_virt_addr:
+	talmmu_devmem_addr_free(ddbuf_info->hndl_memory);
+	ddbuf_info->hndl_memory = NULL;
+error_mem_map_ext_mem:
+error_get_heap_handle:
+	free_data.buf_id = ddbuf_info->buff_id;
+	img_mem_free(ctx->mem_ctx, free_data.buf_id);
+error_alloc:
+	return result;
+}
+
+/*
+ * @Function	mmu_stream_malloc
+ */
+int mmu_stream_alloc(void *mmu_str_hndl,
+		     enum mmu_eheap_id heap_id,
+		     unsigned int mem_heap_id,
+		     enum sys_emem_attrib mem_attrib,
+		     unsigned int size,
+		     unsigned int alignment,
+		     struct vidio_ddbufinfo *ddbuf_info)
+{
+	struct mmu_str_context *str_ctx =
+		(struct mmu_str_context *)mmu_str_hndl;
+	int result;
+
+	/* Validate inputs. */
+	if (!mmu_str_hndl)
+		return IMG_ERROR_INVALID_PARAMETERS;
+
+	/* Check if device level heap. */
+	switch (heap_id) {
+	case MMU_HEAP_IMAGE_BUFFERS_UNTILED:
+	case MMU_HEAP_BITSTREAM_BUFFERS:
+	case MMU_HEAP_STREAM_BUFFERS:
+		break;
+
+	default:
+		return IMG_ERROR_INVALID_PARAMETERS;
+	}
+
+	ddbuf_info->kmstr_id = str_ctx->km_str_id;
+
+	/* Allocate device memory. */
+	result = mmu_alloc(str_ctx->devmem_ctx_hndl, str_ctx->vxd_dec_context, heap_id, mem_heap_id,
+			   mem_attrib, size, alignment, ddbuf_info);
+	if (result != IMG_SUCCESS)
+		return result;
+
+	return IMG_SUCCESS;
+}
+
+/*
+ * @Function	mmu_stream_map_ext_sg
+ */
+int mmu_stream_map_ext_sg(void *mmu_str_hndl,
+			  enum mmu_eheap_id heap_id,
+			  void *sgt,
+			  unsigned int size,
+			  unsigned int alignment,
+			  enum sys_emem_attrib mem_attrib,
+			  void *cpu_linear_addr,
+			  struct vidio_ddbufinfo *ddbuf_info,
+			  unsigned int *buff_id)
+{
+	struct mmu_str_context *str_ctx =
+		(struct mmu_str_context *)mmu_str_hndl;
+	int result;
+	void *devmem_heap_hndl;
+	unsigned int flags;
+
+	struct vxd_dec_ctx *ctx = str_ctx->vxd_dec_context;
+	struct vxd_dev *vxd = ctx->dev;
+
+	/* Validate inputs. */
+	if (!mmu_str_hndl)
+		return IMG_ERROR_INVALID_PARAMETERS;
+
+	/* Check if device level heap. */
+	switch (heap_id) {
+	case MMU_HEAP_IMAGE_BUFFERS_UNTILED:
+	case MMU_HEAP_BITSTREAM_BUFFERS:
+	case MMU_HEAP_STREAM_BUFFERS:
+		break;
+
+	default:
+		return IMG_ERROR_INVALID_PARAMETERS;
+	}
+
+	if (!str_ctx->devmem_ctx_hndl)
+		return IMG_ERROR_INVALID_PARAMETERS;
+
+	/* Set buffer size. */
+	ddbuf_info->buf_size = size;
+
+	/* Round size up to next multiple of physical pages */
+	if ((size % HOST_MMU_PAGE_SIZE) != 0)
+		size = ((size / HOST_MMU_PAGE_SIZE) + 1) * HOST_MMU_PAGE_SIZE;
+
+	result = img_mem_import(vxd->dev, ctx->mem_ctx, ddbuf_info->buf_size,
+				(enum mem_attr)set_attributes(mem_attrib),
+				(int *)buff_id);
+	if (result != IMG_SUCCESS)
+		return result;
+
+	if (mem_attrib & SYS_MEMATTRIB_SECURE)
+		ddbuf_info->cpu_virt = NULL;
+
+	ddbuf_info->buff_id = *buff_id;
+	ddbuf_info->is_internal = 0;
+
+	ddbuf_info->kmstr_id = str_ctx->km_str_id;
+
+	/* Set buffer size. */
+	ddbuf_info->buf_size = size;
+
+	/* Ensure the address of the buffer is at least page aligned. */
+	ddbuf_info->cpu_virt = cpu_linear_addr;
+
+	/* Get heap handle */
+	result = talmmu_get_heap_handle(heap_id, str_ctx->devmem_ctx_hndl, &devmem_heap_hndl);
+	if (result != IMG_SUCCESS)
+		return result;
+
+	/* Allocate device "virtual" memory. */
+	result = talmmu_devmem_addr_alloc(str_ctx->devmem_ctx_hndl, devmem_heap_hndl, size,
+					  alignment,
+					  &ddbuf_info->hndl_memory);
+	if (result != IMG_SUCCESS)
+		return result;
+
+	/* Get the device virtual address. */
+	result = talmmu_get_dev_virt_addr(ddbuf_info->hndl_memory, &ddbuf_info->dev_virt);
+	if (result != IMG_SUCCESS)
+		goto error_get_dev_virt_addr;
+
+	/* Map memory to the device */
+	flags = VXD_MAP_FLAG_NONE;
+
+	if (mem_attrib & SYS_MEMATTRIB_CORE_READ_ONLY)
+		flags |= VXD_MAP_FLAG_READ_ONLY;
+
+	if (mem_attrib & SYS_MEMATTRIB_CORE_WRITE_ONLY)
+		flags |= VXD_MAP_FLAG_WRITE_ONLY;
+
+	result = vxd_map_buffer_sg(vxd, ctx, ddbuf_info->kmstr_id, ddbuf_info->buff_id, sgt,
+				   ddbuf_info->dev_virt,
+				   flags);
+
+	if (result != IMG_SUCCESS)
+		goto error_map_dev;
+
+	return IMG_SUCCESS;
+
+error_map_dev:
+error_get_dev_virt_addr:
+	talmmu_devmem_addr_free(ddbuf_info->hndl_memory);
+	ddbuf_info->hndl_memory = NULL;
+	return result;
+}
+
+/*
+ * @Function	mmu_stream_map_ext
+ */
+int mmu_stream_map_ext(void *mmu_str_hndl,
+		       enum mmu_eheap_id heap_id,
+		       unsigned int buff_id,
+		       unsigned int size,
+		       unsigned int alignment,
+		       enum sys_emem_attrib mem_attrib,
+		       void *cpu_linear_addr,
+		       struct vidio_ddbufinfo *ddbuf_info)
+{
+	struct mmu_str_context *str_ctx =
+		(struct mmu_str_context *)mmu_str_hndl;
+	int result;
+	void *devmem_heap_hndl;
+	struct vxd_dec_ctx *ctx;
+	struct vxd_dev *vxd;
+	unsigned int flags;
+
+	/* Validate inputs. */
+	if (!mmu_str_hndl)
+		return IMG_ERROR_INVALID_PARAMETERS;
+
+	/* Check if device level heap. */
+	switch (heap_id) {
+	case MMU_HEAP_IMAGE_BUFFERS_UNTILED:
+	case MMU_HEAP_BITSTREAM_BUFFERS:
+	case MMU_HEAP_STREAM_BUFFERS:
+		break;
+
+	default:
+		return IMG_ERROR_INVALID_PARAMETERS;
+	}
+
+	/* Round size up to next multiple of physical pages */
+	if ((size % HOST_MMU_PAGE_SIZE) != 0)
+		size = ((size / HOST_MMU_PAGE_SIZE) + 1) * HOST_MMU_PAGE_SIZE;
+
+	ddbuf_info->buff_id = buff_id;
+	ddbuf_info->is_internal = 0;
+
+	ddbuf_info->kmstr_id = str_ctx->km_str_id;
+
+	/* Set buffer size. */
+	ddbuf_info->buf_size = size;
+
+	/* Ensure the address of the buffer is at least page aligned. */
+	ddbuf_info->cpu_virt = cpu_linear_addr;
+
+	/* Get heap handle */
+	result = talmmu_get_heap_handle(heap_id, str_ctx->devmem_ctx_hndl, &devmem_heap_hndl);
+	if (result != IMG_SUCCESS)
+		return result;
+
+	/* Allocate device "virtual" memory. */
+	result = talmmu_devmem_addr_alloc(str_ctx->devmem_ctx_hndl, devmem_heap_hndl, size,
+					  alignment,
+					  &ddbuf_info->hndl_memory);
+	if (result != IMG_SUCCESS)
+		return result;
+
+	/* Get the device virtual address. */
+	result = talmmu_get_dev_virt_addr(ddbuf_info->hndl_memory, &ddbuf_info->dev_virt);
+	if (result != IMG_SUCCESS)
+		return result;
+
+	/*
+	 * Map device memory (allocated from outside VDEC)
+	 * into the stream PTD.
+	 */
+	ctx = str_ctx->vxd_dec_context;
+	vxd = ctx->dev;
+
+	flags = VXD_MAP_FLAG_NONE;
+
+	if (mem_attrib & SYS_MEMATTRIB_CORE_READ_ONLY)
+		flags |= VXD_MAP_FLAG_READ_ONLY;
+
+	if (mem_attrib & SYS_MEMATTRIB_CORE_WRITE_ONLY)
+		flags |= VXD_MAP_FLAG_WRITE_ONLY;
+
+	result = vxd_map_buffer(vxd, ctx, ddbuf_info->kmstr_id, ddbuf_info->buff_id,
+				ddbuf_info->dev_virt,
+				flags);
+	if (result != IMG_SUCCESS)
+		return result;
+
+	return IMG_SUCCESS;
+}
+
+/*
+ * @Function	mmu_free_mem
+ */
+int mmu_free_mem(void *mmustr_hndl, struct vidio_ddbufinfo *ddbuf_info)
+{
+	int tmp_result;
+	int result = IMG_SUCCESS;
+	struct vxd_dec_ctx *ctx;
+	struct vxd_dev *vxd;
+
+	struct mmu_str_context *str_ctx =
+		(struct mmu_str_context *)mmustr_hndl;
+
+	/* Validate inputs. */
+	if (!ddbuf_info)
+		return IMG_ERROR_INVALID_PARAMETERS;
+
+	if (!str_ctx)
+		return IMG_ERROR_INVALID_PARAMETERS;
+
+	/* Unmap the memory mapped to the device */
+	ctx = str_ctx->vxd_dec_context;
+	vxd = ctx->dev;
+
+	tmp_result = vxd_unmap_buffer(vxd, ctx, ddbuf_info->kmstr_id, ddbuf_info->buff_id);
+	if (tmp_result != IMG_SUCCESS)
+		result = tmp_result;
+
+	/*
+	 * Unmapping the memory mapped to the device - done
+	 * Free the memory.
+	 */
+	tmp_result = talmmu_devmem_addr_free(ddbuf_info->hndl_memory);
+	if (tmp_result != IMG_SUCCESS)
+		result = tmp_result;
+
+	if (ddbuf_info->is_internal) {
+		struct vxd_free_data free_data = { ddbuf_info->buff_id };
+
+		img_mem_free(ctx->mem_ctx, free_data.buf_id);
+	}
+
+	return result;
+}
+
+/*
+ * @Function	mmu_free_mem
+ */
+int mmu_free_mem_sg(void *mmustr_hndl, struct vidio_ddbufinfo *ddbuf_info)
+{
+	int tmp_result;
+	int result = IMG_SUCCESS;
+	struct vxd_dec_ctx *ctx;
+	struct vxd_dev *vxd;
+	struct vxd_free_data free_data;
+
+	struct mmu_str_context *str_ctx =
+		(struct mmu_str_context *)mmustr_hndl;
+
+	/* Validate inputs. */
+	if (!ddbuf_info)
+		return IMG_ERROR_INVALID_PARAMETERS;
+
+	if (!str_ctx)
+		return IMG_ERROR_INVALID_PARAMETERS;
+
+	free_data.buf_id = ddbuf_info->buff_id;
+	/* Unmap the memory mapped to the device */
+	ctx = str_ctx->vxd_dec_context;
+	vxd = ctx->dev;
+
+	tmp_result = vxd_unmap_buffer(vxd, ctx, ddbuf_info->kmstr_id, ddbuf_info->buff_id);
+	if (tmp_result != IMG_SUCCESS)
+		result = tmp_result;
+
+	/*
+	 * Unmapping the memory mapped to the device - done
+	 * Free the memory.
+	 */
+	tmp_result = talmmu_devmem_addr_free(ddbuf_info->hndl_memory);
+	if (tmp_result != IMG_SUCCESS)
+		result = tmp_result;
+
+	/*
+	 * for external mem manager buffers, just cleanup the idr list and
+	 * buffer objects
+	 */
+	img_mem_free_bufid(ctx->mem_ctx, free_data.buf_id);
+
+	return result;
+}
+
+/*
+ * @Function                MMU_GetHeap
+ */
+int mmu_get_heap(unsigned int image_stride, enum mmu_eheap_id *heap_id)
+{
+	unsigned int i;
+	unsigned char found = FALSE;
+
+	for (i = 0; i < MMU_HEAP_MAX; i++) {
+		if (mmu_heaps[i].image_buffers) {
+			*heap_id = mmu_heaps[i].heap_id;
+			found = TRUE;
+			break;
+		}
+	}
+
+	VDEC_ASSERT(found);
+	if (!found)
+		return IMG_ERROR_COULD_NOT_OBTAIN_RESOURCE;
+
+	return IMG_SUCCESS;
+}
diff --git a/drivers/staging/media/vxd/decoder/vdec_mmu_wrapper.h b/drivers/staging/media/vxd/decoder/vdec_mmu_wrapper.h
new file mode 100644
index 000000000000..50bed98240a6
--- /dev/null
+++ b/drivers/staging/media/vxd/decoder/vdec_mmu_wrapper.h
@@ -0,0 +1,174 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * VDEC MMU Functions
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ *	Lakshmi Sankar <lakshmisankar-t@ti.com>
+ *
+ * Re-written for upstream
+ *	Sidraya Jayagond <sidraya.bj@pathpartnertech.com>
+ */
+
+#include <linux/dma-mapping.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-mem2mem.h>
+
+#include "img_errors.h"
+#include "img_mem.h"
+#include "lst.h"
+#include "mmu_defs.h"
+#include "vid_buf.h"
+
+#ifndef _VXD_MMU_H_
+#define _VXD_MMU_H_
+
+/* Page size of the device MMU */
+#define DEV_MMU_PAGE_SIZE (0x1000)
+/* Page alignment of the device MMU */
+#define DEV_MMU_PAGE_ALIGNMENT  (0x1000)
+
+#define HOST_MMU_PAGE_SIZE	PAGE_SIZE
+
+/*
+ * @Function	mmu_stream_get_ptd_handle
+ * @Description
+ * This function is used to obtain the stream PTD (Page Table Directory)handle
+ * @Input	mmu_str_handle : MMU stream handle.
+ * @Output	str_ptd : Pointer to stream PTD handle.
+ * @Return	IMG_SUCCESS or an error code.
+ */
+int mmu_stream_get_ptd_handle(void *mmu_str_handle, void **str_ptd);
+
+/*
+ * @Function	mmu_device_create
+ * @Description
+ * This function is used to create and initialise the MMU device context.
+ * @Input	mmu_type : MMU type.
+ * @Input	ptd_alignment : Alignment of Page Table directory.
+ * @Output	mmudev_hndl : A pointer used to return the
+ *		MMU device handle.
+ * @Return	IMG_SUCCESS or an error code.
+ */
+int mmu_device_create(enum mmu_etype mmu_type,
+		      unsigned int ptd_alignment,
+		      void **mmudev_hndl);
+
+/*
+ * @Function	mmu_device_destroy
+ * @Description
+ * This function is used to create and initialise the MMU device context.
+ * NOTE: Destroy device automatically destroys any streams and frees and
+ * memory allocated using MMU_StreamMalloc().
+ * @Input	mmudev_hndl : The MMU device handle.
+ * @Return	IMG_SUCCESS or an error code.
+ */
+int mmu_device_destroy(void *mmudev_hndl);
+
+/*
+ * @Function	mmu_stream_create
+ * @Description
+ * This function is used to create and initialise the MMU stream context.
+ * @Input	mmudev_hndl : The MMU device handle.
+ * @Input	km_str_id : Stream Id used in communication with KM driver.
+ * @Output	mmustr_hndl : A pointer used to return the MMU stream handle.
+ * @Return	IMG_SUCCESS or an error code.
+ */
+int mmu_stream_create(void *mmudev_hndl, unsigned int km_str_id, void *vxd_dec_ctx,
+		      void **mmustr_hndl);
+
+/**
+ * mmu_stream_destroy - This function is used to create and initialise the MMU stream context.
+ * @mmustr_hndl : The MMU stream handle.
+ * Return	IMG_SUCCESS or an error code.
+ *
+ * NOTE: Destroy automatically frees and memory allocated using
+ *	mmu_stream_malloc().
+ */
+int mmu_stream_destroy(void *mmustr_hndl);
+
+/*
+ * @Function	mmu_stream_alloc
+ * @Description
+ * This function is used to allocate stream memory.
+ * @Input	mmustr_hndl : The MMU stream handle.
+ * @Input	heap_id : The MMU heap Id.
+ * @Input	mem_heap_id : Memory heap id
+ * @Input	mem_attrib : Memory attributes
+ * @Input	size : The size, in bytes, to be allocated.
+ * @Input	alignment : The required byte alignment
+ *		(1, 2, 4, 8, 16 etc).
+ * @Output	ddbuf_info : A pointer to a #vidio_ddbufinfo structure
+ *		used to return the buffer info.
+ * @Return	IMG_SUCCESS or an error code.
+ */
+int  mmu_stream_alloc(void *mmustr_hndl,
+		      enum mmu_eheap_id heap_id,
+		      unsigned int mem_heap_id,
+		      enum sys_emem_attrib mem_attrib,
+		      unsigned int size,
+		      unsigned int alignment,
+		      struct vidio_ddbufinfo *ddbuf_info);
+
+/*
+ * @Function	mmu_stream_map_ext
+ * @Description
+ * This function is used to malloc device memory (virtual memory), but mapping
+ * this to memory that has already been allocated (externally).
+ * NOTE: Memory can be freed using MMU_Free().  However, this does not
+ *	free the memory provided by the caller via pvCpuLinearAddr.
+ * @Input	mmustr_hndl    : The MMU stream handle.
+ * @Input	heap_id : The heap Id.
+ * @Input	buff_id : The buffer Id.
+ * @Input	size : The size, in bytes, to be allocated.
+ * @Input	alignment : The required byte alignment (1, 2, 4, 8, 16 etc).
+ * @Input	mem_attrib : Memory attributes
+ * @Input	cpu_linear_addr : CPU linear address of the memory
+ *		to be allocated for the device.
+ * @Output	ddbuf_info : A pointer to a #vidio_ddbufinfo structure
+ *		used to return the buffer info.
+ * @Return	IMG_SUCCESS or an error code.
+ */
+int mmu_stream_map_ext(void *mmustr_hndl,
+		       enum mmu_eheap_id heap_id,
+		       unsigned int buff_id,
+		       unsigned int size,
+		       unsigned int alignment,
+		       enum sys_emem_attrib mem_attrib,
+		       void *cpu_linear_addr,
+		       struct vidio_ddbufinfo *ddbuf_info);
+
+int mmu_stream_map_ext_sg(void *mmustr_hndl,
+			  enum mmu_eheap_id heap_id,
+			  void *sgt,
+			  unsigned int size,
+			  unsigned int alignment,
+			  enum sys_emem_attrib mem_attrib,
+			  void *cpu_linear_addr,
+			  struct vidio_ddbufinfo *ddbuf_info,
+			  unsigned int *buff_id);
+
+/*
+ * @Function	mmu_free_mem
+ * @Description
+ * This function is used to free device memory.
+ * @Input	ps_dd_buf_info : A pointer to a #vidio_ddbufinfo structure.
+ * @Return	IMG_SUCCESS or an error code.
+ */
+int mmu_free_mem(void *mmustr_hndl, struct vidio_ddbufinfo *ddbuf_info);
+
+/*
+ * @Function	mmu_free_mem
+ * @Description
+ * This function is used to free device memory.
+ * @Input	ps_dd_buf_info : A pointer to a #vidio_ddbufinfo structure.
+ * @Return	IMG_SUCCESS or an error code.
+ */
+int mmu_free_mem_sg(void *mmustr_hndl, struct vidio_ddbufinfo *ddbuf_info);
+
+int mmu_get_heap(unsigned int image_stride, enum mmu_eheap_id *heap_id);
+
+#endif /* _VXD_MMU_H_ */
-- 
2.17.1


-- 






This
message contains confidential information and is intended only 
for the
individual(s) named. If you are not the intended
recipient, you are 
notified that disclosing, copying, distributing or taking any
action in 
reliance on the contents of this mail and attached file/s is strictly
prohibited. Please notify the
sender immediately and delete this e-mail 
from your system. E-mail transmission
cannot be guaranteed to be secured or 
error-free as information could be
intercepted, corrupted, lost, destroyed, 
arrive late or incomplete, or contain
viruses. The sender therefore does 
not accept liability for any errors or
omissions in the contents of this 
message, which arise as a result of e-mail
transmission.

^ permalink raw reply related	[flat|nested] 48+ messages in thread

* [PATCH 13/30] v4l: vxd-dec: Add Bistream Preparser (BSPP) module
  2021-08-18 14:10 [PATCH 00/30] TI Video Decoder driver upstreaming to v5.14-rc6 kernel sidraya.bj
                   ` (11 preceding siblings ...)
  2021-08-18 14:10 ` [PATCH 12/30] v4l: vxd-dec: Add VDEC MMU wrapper sidraya.bj
@ 2021-08-18 14:10 ` sidraya.bj
  2021-08-18 14:10 ` [PATCH 14/30] v4l: vxd-dec: Add common headers sidraya.bj
                   ` (18 subsequent siblings)
  31 siblings, 0 replies; 48+ messages in thread
From: sidraya.bj @ 2021-08-18 14:10 UTC (permalink / raw)
  To: gregkh, linux-staging, linux-kernel
  Cc: prashanth.ka, praneeth, mchehab, linux-media, praveen.ap, Sidraya

From: Sidraya <sidraya.bj@pathpartnertech.com>

Contains the implementation of Bitstream preparser, it supports
preparsing of H264, HEVC and MJPEG bitstreams at present.
It uses software shift register implementation for bitstream parsing.

Signed-off-by: Lakshmi Sankar <lakshmisankar-t@ti.com>

Signed-off-by: Sidraya <sidraya.bj@pathpartnertech.com>
---
 MAINTAINERS                                   |   11 +
 drivers/staging/media/vxd/decoder/bspp.c      | 2479 ++++++++++++++
 drivers/staging/media/vxd/decoder/bspp.h      |  363 ++
 drivers/staging/media/vxd/decoder/bspp_int.h  |  514 +++
 .../media/vxd/decoder/h264_secure_parser.c    | 3051 +++++++++++++++++
 .../media/vxd/decoder/h264_secure_parser.h    |  278 ++
 .../media/vxd/decoder/hevc_secure_parser.c    | 2895 ++++++++++++++++
 .../media/vxd/decoder/hevc_secure_parser.h    |  455 +++
 .../media/vxd/decoder/jpeg_secure_parser.c    |  645 ++++
 .../media/vxd/decoder/jpeg_secure_parser.h    |   37 +
 drivers/staging/media/vxd/decoder/swsr.c      | 1657 +++++++++
 drivers/staging/media/vxd/decoder/swsr.h      |  278 ++
 12 files changed, 12663 insertions(+)
 create mode 100644 drivers/staging/media/vxd/decoder/bspp.c
 create mode 100644 drivers/staging/media/vxd/decoder/bspp.h
 create mode 100644 drivers/staging/media/vxd/decoder/bspp_int.h
 create mode 100644 drivers/staging/media/vxd/decoder/h264_secure_parser.c
 create mode 100644 drivers/staging/media/vxd/decoder/h264_secure_parser.h
 create mode 100644 drivers/staging/media/vxd/decoder/hevc_secure_parser.c
 create mode 100644 drivers/staging/media/vxd/decoder/hevc_secure_parser.h
 create mode 100644 drivers/staging/media/vxd/decoder/jpeg_secure_parser.c
 create mode 100644 drivers/staging/media/vxd/decoder/jpeg_secure_parser.h
 create mode 100644 drivers/staging/media/vxd/decoder/swsr.c
 create mode 100644 drivers/staging/media/vxd/decoder/swsr.h

diff --git a/MAINTAINERS b/MAINTAINERS
index 6c3f7a55ce9b..baf1f19e21f7 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -19560,9 +19560,20 @@ F:	drivers/staging/media/vxd/common/talmmu_api.c
 F:	drivers/staging/media/vxd/common/talmmu_api.h
 F:	drivers/staging/media/vxd/common/work_queue.c
 F:	drivers/staging/media/vxd/common/work_queue.h
+F:	drivers/staging/media/vxd/decoder/bspp.c
+F:	drivers/staging/media/vxd/decoder/bspp.h
+F:	drivers/staging/media/vxd/decoder/bspp_int.h
+F:	drivers/staging/media/vxd/decoder/h264_secure_parser.c
+F:	drivers/staging/media/vxd/decoder/h264_secure_parser.h
+F:	drivers/staging/media/vxd/decoder/hevc_secure_parser.c
+F:	drivers/staging/media/vxd/decoder/hevc_secure_parser.h
 F:	drivers/staging/media/vxd/decoder/hw_control.c
 F:	drivers/staging/media/vxd/decoder/hw_control.h
 F:	drivers/staging/media/vxd/decoder/img_dec_common.h
+F:	drivers/staging/media/vxd/decoder/jpeg_secure_parser.c
+F:	drivers/staging/media/vxd/decoder/jpeg_secure_parser.h
+F:	drivers/staging/media/vxd/decoder/swsr.c
+F:	drivers/staging/media/vxd/decoder/swsr.h
 F:	drivers/staging/media/vxd/decoder/translation_api.c
 F:	drivers/staging/media/vxd/decoder/translation_api.h
 F:	drivers/staging/media/vxd/decoder/vdec_mmu_wrapper.c
diff --git a/drivers/staging/media/vxd/decoder/bspp.c b/drivers/staging/media/vxd/decoder/bspp.c
new file mode 100644
index 000000000000..b3a03e99a2c2
--- /dev/null
+++ b/drivers/staging/media/vxd/decoder/bspp.c
@@ -0,0 +1,2479 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * VXD Bitstream Buffer Pre-Parser
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ *	Lakshmi Sankar <lakshmisankar-t@ti.com>
+ *
+ * Re-written for upstreming
+ *	Prashanth Kumar Amai <prashanth.ka@pathpartnertech.com>
+ *	Sidraya Jayagond <sidraya.bj@pathpartnertech.com>
+ */
+
+#include <linux/slab.h>
+#include <linux/printk.h>
+#include <linux/mutex.h>
+#include <linux/dma-mapping.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-mem2mem.h>
+
+#include "bspp.h"
+#include "h264_secure_parser.h"
+#include "hevc_secure_parser.h"
+#ifdef HAS_JPEG
+#include "jpeg_secure_parser.h"
+#endif
+#include "lst.h"
+#include "swsr.h"
+#include "vdecdd_defs.h"
+
+#define BSPP_ERR_MSG_LENGTH     1024
+
+/*
+ * This type defines the exception flag to catch the error if more catch block
+ * is required to catch different kind of error then more enum can be added
+ * @breif BSPP exception handler to catch the errors
+ */
+enum bspp_exception_handler {
+	/* BSPP parse exception handler */
+	BSPP_EXCEPTION_HANDLER_NONE = 0x00,
+	/* Jump at exception (external use) */
+	BSPP_EXCEPTION_HANDLER_JUMP,
+	BSPP_EXCEPTION_FORCE32BITS = 0x7FFFFFFFU
+};
+
+/*
+ * This structure contains bitstream buffer information.
+ * @brief  BSPP Bitstream Buffer Information
+ */
+struct bspp_bitstream_buffer {
+	void **lst_link;
+	struct bspp_ddbuf_info ddbuf_info;
+	unsigned int data_size;
+	unsigned int bufmap_id;
+	enum vdec_bstr_element_type bstr_element_type;
+	unsigned long long bytes_read;
+	void *pict_tag_param;
+};
+
+/*
+ * This structure contains shift-register state.
+ * @brief  BSPP Shift-register State
+ */
+struct bspp_parse_ctx {
+	void *swsr_context;
+	enum swsr_exception exception;
+};
+
+/*
+ * This structure contains context for the current picture.
+ * @brief  BSPP Picture Context
+ */
+struct bspp_pict_ctx {
+	struct bspp_sequence_hdr_info *sequ_hdr_info;
+	int closed_gop;
+	struct bspp_pict_hdr_info pict_hdr_info[VDEC_H264_MVC_MAX_VIEWS];
+	struct bspp_sequence_hdr_info *ext_sequ_hdr_info;
+	int present;
+	int invalid;
+	int unsupported;
+	int finished;
+	unsigned int new_pict_signalled;
+};
+
+/*
+ * This structure contains resources allocated for the stream.
+ * @brief  BSPP Stream Resource Allocations
+ */
+struct bspp_stream_alloc_data {
+	struct lst_t sequence_data_list[SEQUENCE_SLOTS];
+	struct lst_t pps_data_list[PPS_SLOTS];
+	struct lst_t available_sequence_list;
+	struct lst_t available_ppss_list;
+	struct lst_t raw_data_list_available;
+	struct lst_t raw_data_list_used;
+	struct lst_t vps_data_list[VPS_SLOTS];
+	struct lst_t raw_sei_alloc_list;
+	struct lst_t available_vps_list;
+};
+
+struct bspp_raw_sei_alloc {
+	void **lst_link;
+	struct vdec_raw_bstr_data raw_sei_data;
+};
+
+/*
+ * This structure contains bitstream parsing state information for the current
+ * group of buffers.
+ * @brief  BSPP Bitstream Parsing State Information
+ */
+struct bspp_grp_bstr_ctx {
+	enum vdec_vid_std vid_std;
+	int disable_mvc;
+	int delim_present;
+	void *swsr_context;
+	enum bspp_unit_type unit_type;
+	enum bspp_unit_type last_unit_type;
+	int not_pic_unit_yet;
+	int not_ext_pic_unit_yet;
+	unsigned int total_data_size;
+	unsigned int total_bytes_read;
+	struct lst_t buffer_chain;
+	struct lst_t in_flight_bufs;
+	struct lst_t *pre_pict_seg_list[3];
+	struct lst_t *pict_seg_list[3];
+	void **pict_tag_param_array[3];
+	struct lst_t *segment_list;
+	void **pict_tag_param;
+	struct lst_t *free_segments;
+	unsigned int segment_offset;
+	int insert_start_code;
+	unsigned char start_code_suffix;
+	unsigned char current_view_idx;
+};
+
+/*
+ * This structure contains the stream context information.
+ * @brief  BSPP Stream Context Information
+ */
+struct bspp_str_context {
+	enum vdec_vid_std vid_std;
+	int disable_mvc;
+	int full_scan;
+	int immediate_decode;
+	enum vdec_bstr_format bstr_format;
+	struct vdec_codec_config codec_config;
+	unsigned int user_str_id;
+	struct bspp_vid_std_features vid_std_features;
+	struct bspp_swsr_ctx swsr_ctx;
+	struct bspp_parser_callbacks parser_callbacks;
+	struct bspp_stream_alloc_data str_alloc;
+	unsigned int sequ_hdr_id;
+	unsigned char *sequ_hdr_info;
+	unsigned char *secure_sequence_info;
+	unsigned char *pps_info;
+	unsigned char *secure_pps_info;
+	unsigned char *raw_data;
+	struct bspp_grp_bstr_ctx grp_bstr_ctx;
+	struct bspp_parse_ctx parse_ctx;
+	struct bspp_inter_pict_data inter_pict_data;
+	struct lst_t decoded_pictures_list;
+	/* Mutex for secure access */
+	struct mutex *bspp_mutex;
+	int intra_frame_closed_gop;
+	struct bspp_pict_ctx pict_ctx;
+	struct bspp_parse_state parse_state;
+};
+
+/*
+ * This structure contains the standard related parser functions.
+ * @brief  BSPP Standard Related Functions
+ */
+struct bspp_parser_functions {
+	/* Pointer to standard-specific parser configuration function */
+	bspp_cb_set_parser_config set_parser_config;
+	/* Pointer to standard-specific unit type determining function */
+	bspp_cb_determine_unit_type determine_unit_type;
+};
+
+static struct bspp_parser_functions parser_fxns[VDEC_STD_MAX] = {
+	/* VDEC_STD_UNDEFINED */
+	{ NULL, NULL },
+	/* VDEC_STD_MPEG2 */
+	{ NULL, NULL },
+	/* VDEC_STD_MPEG4 */
+	{ NULL, NULL },
+	/* VDEC_STD_H263 */
+	{ NULL, NULL },
+	/* VDEC_STD_H264 */
+	{ bspp_h264_set_parser_config, bspp_h264_determine_unittype },
+	/* VDEC_STD_VC1 */
+	{ NULL, NULL },
+	/* VDEC_STD_AVS */
+	{ NULL, NULL },
+	/* VDEC_STD_REAL */
+	{ NULL, NULL },
+	/* VDEC_STD_JPEG */
+#ifdef HAS_JPEG
+	{ bspp_jpeg_setparser_config, bspp_jpeg_determine_unit_type },
+#else
+	{ NULL, NULL },
+#endif
+	/* VDEC_STD_VP6 */
+	{ NULL, NULL },
+	/* VDEC_STD_VP8 */
+	{ NULL, NULL },
+	/* VDEC_STD_SORENSON */
+	{ NULL, NULL },
+	/* VDEC_STD_HEVC */
+	{ bspp_hevc_set_parser_config, bspp_hevc_determine_unittype },
+};
+
+/*
+ * @Function	bspp_get_pps_hdr
+ * @Description	Obtains the most recent PPS header of a given Id.
+ */
+struct bspp_pps_info *bspp_get_pps_hdr(void *str_res_handle, unsigned int pps_id)
+{
+	struct bspp_stream_alloc_data *alloc_data =
+		(struct bspp_stream_alloc_data *)str_res_handle;
+
+	if (pps_id >= PPS_SLOTS || !alloc_data)
+		return NULL;
+
+	return lst_last(&alloc_data->pps_data_list[pps_id]);
+}
+
+/*
+ * @Function	bspp_get_sequ_hdr
+ * @Description	Obtains the most recent sequence header of a given Id.
+ */
+struct bspp_sequence_hdr_info *bspp_get_sequ_hdr(void *str_res_handle,
+						 unsigned int sequ_id)
+{
+	struct bspp_stream_alloc_data *alloc_data =
+		(struct bspp_stream_alloc_data *)str_res_handle;
+	if (sequ_id >= SEQUENCE_SLOTS || !alloc_data)
+		return NULL;
+
+	return lst_last(&alloc_data->sequence_data_list[sequ_id]);
+}
+
+/*
+ * @Function	bspp_free_bitstream_elem
+ * @Description	Frees a bitstream chain element.
+ */
+static void bspp_free_bitstream_elem(struct bspp_bitstream_buffer *bstr_buf)
+{
+	memset(bstr_buf, 0, sizeof(struct bspp_bitstream_buffer));
+
+	kfree(bstr_buf);
+}
+
+/*
+ * @Function	bspp_create_segment
+ * @Description Constructs a bitstream segment for the current unit and adds
+ *		it to the list.
+ */
+static int bspp_create_segment(struct bspp_grp_bstr_ctx *grp_btsr_ctx,
+			       struct bspp_bitstream_buffer *cur_buf)
+{
+	struct bspp_bitstr_seg *segment;
+	unsigned int result;
+
+	/*
+	 * Only create a segment when data (not in a previous segment) has been
+	 * parsed from the buffer.
+	 */
+	if (cur_buf->bytes_read != grp_btsr_ctx->segment_offset) {
+		/* Allocate a software shift-register context structure */
+		segment = lst_removehead(grp_btsr_ctx->free_segments);
+		if (!segment) {
+			result = IMG_ERROR_COULD_NOT_OBTAIN_RESOURCE;
+			goto error;
+		}
+		memset(segment, 0, sizeof(struct bspp_bitstr_seg));
+
+		segment->bufmap_id = cur_buf->bufmap_id;
+		segment->data_size = (unsigned int)cur_buf->bytes_read
+			- grp_btsr_ctx->segment_offset;
+		segment->data_byte_offset = grp_btsr_ctx->segment_offset;
+
+		if (cur_buf->bytes_read == cur_buf->data_size) {
+			/* This is the last segment in the buffer. */
+			segment->bstr_seg_flag |= VDECDD_BSSEG_LASTINBUFF;
+		}
+
+		/*
+		 * Next segment will start part way through the buffer
+		 * (current read position).
+		 */
+		grp_btsr_ctx->segment_offset = (unsigned int)cur_buf->bytes_read;
+
+		if (grp_btsr_ctx->insert_start_code) {
+			segment->bstr_seg_flag |= VDECDD_BSSEG_INSERT_STARTCODE;
+			segment->start_code_suffix = grp_btsr_ctx->start_code_suffix;
+			grp_btsr_ctx->insert_start_code = 0;
+		}
+
+		lst_add(grp_btsr_ctx->segment_list, segment);
+
+		/*
+		 * If multiple segments correspond to the same (picture)
+		 * stream-unit, update it only the first time
+		 */
+		if (cur_buf->pict_tag_param && grp_btsr_ctx->pict_tag_param &&
+		    (grp_btsr_ctx->segment_list ==
+		     grp_btsr_ctx->pict_seg_list[0] ||
+		     grp_btsr_ctx->segment_list ==
+		     grp_btsr_ctx->pict_seg_list[1] ||
+		     grp_btsr_ctx->segment_list ==
+		     grp_btsr_ctx->pict_seg_list[2]))
+			*grp_btsr_ctx->pict_tag_param = cur_buf->pict_tag_param;
+	}
+
+	return IMG_SUCCESS;
+error:
+	return result;
+}
+
+/*
+ * @Function bspp_DetermineUnitType
+ *
+ */
+static int bspp_determine_unit_type(enum vdec_vid_std vid_std,
+				    unsigned char unit_type,
+				    int disable_mvc,
+				    enum bspp_unit_type *unit_type_enum)
+{
+	/* Determine the unit type from the NAL type. */
+	if (vid_std < VDEC_STD_MAX && parser_fxns[vid_std].determine_unit_type)
+		parser_fxns[vid_std].determine_unit_type(unit_type, disable_mvc, unit_type_enum);
+	else
+		return IMG_ERROR_INVALID_PARAMETERS;
+
+	return IMG_SUCCESS;
+}
+
+/*
+ * @Function	bspp_shift_reg_cb
+ *
+ */
+static void bspp_shift_reg_cb(enum swsr_cbevent event,
+			      struct bspp_grp_bstr_ctx *grp_btsr_ctx,
+			      unsigned char nal_type,
+			      unsigned char **data_buffer,
+			      unsigned long long *data_size)
+{
+	unsigned int result;
+
+	switch (event) {
+	case SWSR_EVENT_INPUT_BUFFER_START: {
+		struct bspp_bitstream_buffer *next_buf;
+
+		/* Take the next bitstream buffer for use in shift-register. */
+		next_buf = lst_removehead(&grp_btsr_ctx->buffer_chain);
+
+		if (next_buf && data_buffer && data_size) {
+			lst_add(&grp_btsr_ctx->in_flight_bufs, next_buf);
+
+			*data_buffer = next_buf->ddbuf_info.cpu_virt_addr;
+			*data_size = next_buf->data_size;
+
+			next_buf->bytes_read = 0;
+		} else {
+			goto error;
+		}
+	}
+	break;
+	case SWSR_EVENT_OUTPUT_BUFFER_END: {
+		struct bspp_bitstream_buffer *cur_buf;
+
+		cur_buf = lst_removehead(&grp_btsr_ctx->in_flight_bufs);
+
+		if (cur_buf) {
+			/*
+			 * Indicate that the whole buffer content has been
+			 * used.
+			 */
+			cur_buf->bytes_read = cur_buf->data_size;
+			grp_btsr_ctx->total_bytes_read += (unsigned int)cur_buf->bytes_read;
+
+			/*
+			 * Construct segment for current buffer and add to
+			 * active list.
+			 */
+			result = bspp_create_segment(grp_btsr_ctx, cur_buf);
+			if (result != IMG_SUCCESS)
+				goto error;
+
+			/*
+			 * Next segment will start at the beginning of the next
+			 * buffer.
+			 */
+			grp_btsr_ctx->segment_offset = 0;
+
+			/* Destroy the bitstream element. */
+			bspp_free_bitstream_elem(cur_buf);
+		} else {
+			goto error;
+		}
+	}
+	break;
+
+	case SWSR_EVENT_DELIMITER_NAL_TYPE:
+		/*
+		 * Initialise the unit type with the last (unclassified or
+		 * unsupported types are not retained since they.
+		 */
+		grp_btsr_ctx->unit_type = grp_btsr_ctx->last_unit_type;
+
+		/*
+		 * Determine the unit type without consuming any data (start
+		 * code) from shift-register. Segments are created automatically
+		 * when a new buffer is requested by the shift-register so the
+		 * unit type must be known in order to switch over the segment
+		 * list.
+		 */
+		result = bspp_determine_unit_type(grp_btsr_ctx->vid_std, nal_type,
+						  grp_btsr_ctx->disable_mvc,
+						  &grp_btsr_ctx->unit_type);
+
+		/*
+		 * Only look to change bitstream segment list when the unit type
+		 * is different and the current unit contains data that could be
+		 * placed in a new list.
+		 */
+		if (grp_btsr_ctx->last_unit_type != grp_btsr_ctx->unit_type &&
+		    grp_btsr_ctx->unit_type != BSPP_UNIT_UNSUPPORTED &&
+		    grp_btsr_ctx->unit_type != BSPP_UNIT_UNCLASSIFIED) {
+			int prev_pict_data;
+			int curr_pict_data;
+
+			prev_pict_data = (grp_btsr_ctx->last_unit_type == BSPP_UNIT_PICTURE ||
+					  grp_btsr_ctx->last_unit_type ==
+					  BSPP_UNIT_SKIP_PICTURE) ? 1 : 0;
+
+			curr_pict_data = (grp_btsr_ctx->unit_type == BSPP_UNIT_PICTURE ||
+					  grp_btsr_ctx->unit_type ==
+					  BSPP_UNIT_SKIP_PICTURE) ? 1 : 0;
+
+			/*
+			 * When switching between picture and non-picture
+			 * units.
+			 */
+			if ((prev_pict_data && !curr_pict_data) ||
+			    (!prev_pict_data && curr_pict_data)) {
+				/*
+				 * Only delimit unit change when we're not the
+				 * first unit and  we're not already in the last
+				 * segment list.
+				 */
+				if (grp_btsr_ctx->last_unit_type != BSPP_UNIT_NONE &&
+				    grp_btsr_ctx->segment_list !=
+				    grp_btsr_ctx->pict_seg_list[2]) {
+					struct bspp_bitstream_buffer *cur_buf =
+						lst_first(&grp_btsr_ctx->in_flight_bufs);
+					if (!cur_buf)
+						goto error;
+
+					/*
+					 * Update the offset within current buf.
+					 */
+					swsr_get_byte_offset_curbuf(grp_btsr_ctx->swsr_context,
+								    &cur_buf->bytes_read);
+
+					/*
+					 * Create the last segment of the
+					 * previous type (which may split a
+					 * buffer into two). If the unit is
+					 * exactly at the start of a buffer this
+					 * will not create a zero-byte segment.
+					 */
+					result = bspp_create_segment(grp_btsr_ctx, cur_buf);
+					if (result != IMG_SUCCESS)
+						goto error;
+				}
+
+				/* Point at the next segment list. */
+				if (grp_btsr_ctx->segment_list
+					== grp_btsr_ctx->pre_pict_seg_list[0]) {
+					grp_btsr_ctx->segment_list =
+						grp_btsr_ctx->pict_seg_list[0];
+					grp_btsr_ctx->pict_tag_param =
+						grp_btsr_ctx->pict_tag_param_array[0];
+				} else if (grp_btsr_ctx->segment_list
+					== grp_btsr_ctx->pict_seg_list[0])
+					grp_btsr_ctx->segment_list =
+						grp_btsr_ctx->pre_pict_seg_list[1];
+				else if (grp_btsr_ctx->segment_list
+					== grp_btsr_ctx->pre_pict_seg_list[1]) {
+					grp_btsr_ctx->segment_list =
+						grp_btsr_ctx->pict_seg_list[1];
+					grp_btsr_ctx->pict_tag_param =
+						grp_btsr_ctx->pict_tag_param_array[1];
+				} else if (grp_btsr_ctx->segment_list
+					== grp_btsr_ctx->pict_seg_list[1])
+					grp_btsr_ctx->segment_list =
+						grp_btsr_ctx->pre_pict_seg_list[2];
+				else if (grp_btsr_ctx->segment_list
+					== grp_btsr_ctx->pre_pict_seg_list[2]) {
+					grp_btsr_ctx->segment_list =
+						grp_btsr_ctx->pict_seg_list[2];
+					grp_btsr_ctx->pict_tag_param =
+						grp_btsr_ctx->pict_tag_param_array[2];
+				}
+			}
+
+			grp_btsr_ctx->last_unit_type = grp_btsr_ctx->unit_type;
+		}
+		break;
+
+	default:
+		break;
+	}
+
+error:
+	return;
+}
+
+/*
+ * @Function	bspp_exception_handler
+ *
+ */
+static void bspp_exception_handler(enum swsr_exception exception, void *parse_ctx_handle)
+{
+	struct bspp_parse_ctx *parse_ctx = (struct bspp_parse_ctx *)parse_ctx_handle;
+
+	/* Store the exception. */
+	parse_ctx->exception = exception;
+
+	switch (parse_ctx->exception) {
+	case SWSR_EXCEPT_NO_EXCEPTION:
+		break;
+	case SWSR_EXCEPT_ENCAPULATION_ERROR1:
+		break;
+	case SWSR_EXCEPT_ENCAPULATION_ERROR2:
+		break;
+	case SWSR_EXCEPT_ACCESS_INTO_SCP:
+		break;
+	case SWSR_EXCEPT_ACCESS_BEYOND_EOD:
+		break;
+	case SWSR_EXCEPT_EXPGOULOMB_ERROR:
+		break;
+	case SWSR_EXCEPT_WRONG_CODEWORD_ERROR:
+		break;
+	case SWSR_EXCEPT_NO_SCP:
+		break;
+	case SWSR_EXCEPT_INVALID_CONTEXT:
+		break;
+
+	default:
+		break;
+	}
+
+	/* Clear the exception. */
+	swsr_check_exception(parse_ctx->swsr_context);
+}
+
+/*
+ * @Function	bspp_reset_sequence
+ *
+ */
+static void bspp_reset_sequence(struct bspp_str_context *str_ctx,
+				struct bspp_sequence_hdr_info *sequ_hdr_info)
+{
+	/* Temporarily store relevant sequence fields. */
+	struct bspp_ddbuf_array_info aux_fw_sequence = sequ_hdr_info->fw_sequence;
+	void *aux_secure_sequence_info_hndl = sequ_hdr_info->secure_sequence_info;
+
+	struct bspp_ddbuf_array_info *tmp = &sequ_hdr_info->fw_sequence;
+
+	/* Reset all related structures. */
+	memset(((unsigned char *)tmp->ddbuf_info.cpu_virt_addr + tmp->buf_offset), 0x00,
+	       sequ_hdr_info->fw_sequence.buf_element_size);
+
+	if (str_ctx->parser_callbacks.reset_data_cb)
+		str_ctx->parser_callbacks.reset_data_cb(BSPP_UNIT_SEQUENCE,
+			sequ_hdr_info->secure_sequence_info);
+	else
+		memset(aux_secure_sequence_info_hndl, 0, str_ctx->vid_std_features.seq_size);
+
+	memset(sequ_hdr_info, 0, sizeof(*sequ_hdr_info));
+
+	/* Restore relevant sequence fields. */
+	sequ_hdr_info->fw_sequence = aux_fw_sequence;
+	sequ_hdr_info->sequ_hdr_info.bufmap_id = aux_fw_sequence.ddbuf_info.bufmap_id;
+	sequ_hdr_info->sequ_hdr_info.buf_offset = aux_fw_sequence.buf_offset;
+	sequ_hdr_info->secure_sequence_info = aux_secure_sequence_info_hndl;
+}
+
+/*
+ * @Function	bspp_reset_pps
+ *
+ */
+static void bspp_reset_pps(struct bspp_str_context *str_ctx,
+			   struct bspp_pps_info *pps_info)
+{
+	/* Temporarily store relevant PPS fields. */
+	struct bspp_ddbuf_array_info aux_fw_pps = pps_info->fw_pps;
+	void *aux_secure_pps_info_hndl = pps_info->secure_pps_info;
+	struct bspp_ddbuf_array_info *tmp = &pps_info->fw_pps;
+
+	/* Reset all related structures. */
+	memset(((unsigned char *)tmp->ddbuf_info.cpu_virt_addr + tmp->buf_offset), 0x00,
+	       pps_info->fw_pps.buf_element_size);
+
+	/* Reset the parser specific data. */
+	if (str_ctx->parser_callbacks.reset_data_cb)
+		str_ctx->parser_callbacks.reset_data_cb(BSPP_UNIT_PPS, pps_info->secure_pps_info);
+
+	/* Reset the common data. */
+	memset(pps_info, 0, sizeof(*pps_info));
+
+	/* Restore relevant PPS fields. */
+	pps_info->fw_pps = aux_fw_pps;
+	pps_info->bufmap_id = aux_fw_pps.ddbuf_info.bufmap_id;
+	pps_info->buf_offset = aux_fw_pps.buf_offset;
+	pps_info->secure_pps_info = aux_secure_pps_info_hndl;
+}
+
+/*
+ * @Function	bspp_stream_submit_buffer
+ *
+ */
+int bspp_stream_submit_buffer(void *str_context_handle,
+			      const struct bspp_ddbuf_info *ddbuf_info,
+			      unsigned int bufmap_id,
+			      unsigned int data_size,
+			      void *pict_tag_param,
+			      enum vdec_bstr_element_type bstr_element_type)
+{
+	struct bspp_str_context *str_ctx = (struct bspp_str_context *)str_context_handle;
+	struct bspp_bitstream_buffer *bstr_buf;
+	unsigned int result = IMG_SUCCESS;
+
+	if (!str_context_handle) {
+		result = IMG_ERROR_INVALID_PARAMETERS;
+		goto error;
+	}
+
+	if (bstr_element_type == VDEC_BSTRELEMENT_UNDEFINED ||
+	    bstr_element_type >= VDEC_BSTRELEMENT_MAX) {
+		result = IMG_ERROR_INVALID_PARAMETERS;
+		goto error;
+	}
+
+	/*
+	 * Check that the new bitstream buffer is compatible with those
+	 * before.
+	 */
+	bstr_buf = lst_last(&str_ctx->grp_bstr_ctx.buffer_chain);
+	if (bstr_buf && bstr_buf->bstr_element_type != bstr_element_type) {
+		result = IMG_ERROR_INVALID_PARAMETERS;
+		goto error;
+	}
+
+	/* Allocate a bitstream buffer chain element structure */
+	bstr_buf = kmalloc(sizeof(*bstr_buf), GFP_KERNEL);
+	if (!bstr_buf) {
+		result = IMG_ERROR_OUT_OF_MEMORY;
+		goto error;
+	}
+	memset(bstr_buf, 0, sizeof(*bstr_buf));
+
+	/* Queue buffer in a chain since units might span buffers. */
+	if (ddbuf_info)
+		bstr_buf->ddbuf_info = *ddbuf_info;
+
+	bstr_buf->data_size = data_size;
+	bstr_buf->bstr_element_type = bstr_element_type;
+	bstr_buf->pict_tag_param = pict_tag_param;
+	bstr_buf->bufmap_id = bufmap_id;
+	lst_add(&str_ctx->grp_bstr_ctx.buffer_chain, bstr_buf);
+
+	str_ctx->grp_bstr_ctx.total_data_size += data_size;
+
+error:
+	return result;
+}
+
+/*
+ * @Function	bspp_sequence_hdr_info
+ *
+ */
+static struct bspp_sequence_hdr_info *bspp_obtain_sequence_hdr(struct bspp_str_context *str_ctx)
+{
+	struct bspp_stream_alloc_data *str_alloc = &str_ctx->str_alloc;
+	struct bspp_sequence_hdr_info *sequ_hdr_info;
+
+	/*
+	 * Obtain any partially filled sequence data else provide a new one
+	 * (always new for H.264 and HEVC)
+	 */
+	sequ_hdr_info = lst_last(&str_alloc->sequence_data_list[BSPP_DEFAULT_SEQUENCE_ID]);
+	if (!sequ_hdr_info || sequ_hdr_info->ref_count > 0 || str_ctx->vid_std == VDEC_STD_H264 ||
+	    str_ctx->vid_std == VDEC_STD_HEVC) {
+		/* Get Sequence resource. */
+		sequ_hdr_info = lst_removehead(&str_alloc->available_sequence_list);
+		if (sequ_hdr_info) {
+			bspp_reset_sequence(str_ctx, sequ_hdr_info);
+			sequ_hdr_info->sequ_hdr_info.sequ_hdr_id = BSPP_INVALID;
+		}
+	}
+
+	return sequ_hdr_info;
+}
+
+/*
+ * @Function	bspp_submit_picture_decoded
+ *
+ */
+int bspp_submit_picture_decoded(void *str_context_handle,
+				struct bspp_picture_decoded *picture_decoded)
+{
+	struct bspp_picture_decoded *picture_decoded_elem;
+	struct bspp_str_context *str_ctx = (struct bspp_str_context *)str_context_handle;
+
+	/* Validate input arguments. */
+	if (!str_context_handle)
+		return IMG_ERROR_INVALID_PARAMETERS;
+
+	picture_decoded_elem = kmalloc(sizeof(*picture_decoded_elem), GFP_KERNEL);
+	if (!picture_decoded_elem)
+		return IMG_ERROR_MALLOC_FAILED;
+
+	*picture_decoded_elem = *picture_decoded;
+
+	/* Lock access to the list for adding a picture - HIGH PRIORITY */
+	mutex_lock_nested(str_ctx->bspp_mutex, SUBCLASS_BSPP);
+
+	lst_add(&str_ctx->decoded_pictures_list, picture_decoded_elem);
+
+	/* Unlock access to the list for adding a picture - HIGH PRIORITY */
+	mutex_unlock(str_ctx->bspp_mutex);
+
+	return IMG_SUCCESS;
+}
+
+/*
+ * @Function	bspp_check_and_detach_pps_info
+ *
+ */
+static void bspp_check_and_detach_pps_info(struct bspp_stream_alloc_data *str_alloc,
+					   unsigned int pps_id)
+{
+	if (pps_id != BSPP_INVALID) {
+		struct bspp_pps_info *pps_info = lst_first(&str_alloc->pps_data_list[pps_id]);
+
+		if (!pps_info) /* Invalid id */
+			return;
+
+		pps_info->ref_count--;
+		/* If nothing references it any more */
+		if (pps_info->ref_count == 0) {
+			struct bspp_pps_info *next_pps_info = lst_next(pps_info);
+
+			/*
+			 * If it is not the last sequence in the slot list
+			 * remove it and return it to the pool-list
+			 */
+			if (next_pps_info) {
+				lst_remove(&str_alloc->pps_data_list[pps_id], pps_info);
+				lst_addhead(&str_alloc->available_ppss_list, pps_info);
+			}
+		}
+	}
+}
+
+/*
+ * @Function	bspp_picture_decoded
+ *
+ */
+static int bspp_picture_decoded(struct bspp_str_context *str_ctx,
+				struct bspp_picture_decoded *picture_decoded)
+{
+	struct bspp_stream_alloc_data *str_alloc = &str_ctx->str_alloc;
+
+	/* Manage Sequence */
+	if (picture_decoded->sequ_hdr_id != BSPP_INVALID) {
+		struct bspp_sequence_hdr_info *seq =
+			lst_first(&str_alloc->sequence_data_list[picture_decoded->sequ_hdr_id]);
+
+		if (!seq)
+			return IMG_ERROR_INVALID_ID;
+
+		if (picture_decoded->not_decoded) {
+			/* Release sequence data. */
+			if (str_ctx->parser_callbacks.release_data_cb)
+				str_ctx->parser_callbacks.release_data_cb((void *)str_alloc,
+					BSPP_UNIT_SEQUENCE, seq->secure_sequence_info);
+		}
+
+		seq->ref_count--;
+		/* If nothing references it any more */
+		if (seq->ref_count == 0) {
+			struct bspp_sequence_hdr_info *next_sequ_hdr_info = lst_next(seq);
+
+			/*
+			 * If it is not the last sequence in the slot list
+			 * remove it and return it to the pool-list
+			 */
+			if (next_sequ_hdr_info) {
+				lst_remove(&str_alloc->sequence_data_list
+					   [picture_decoded->sequ_hdr_id], seq);
+				/* Release sequence data. */
+				if (str_ctx->parser_callbacks.release_data_cb)
+					str_ctx->parser_callbacks.release_data_cb((void *)str_alloc,
+						BSPP_UNIT_SEQUENCE, seq->secure_sequence_info);
+
+				lst_addhead(&str_alloc->available_sequence_list, seq);
+			}
+		}
+	}
+
+	/*
+	 * Expect at least one valid PPS for H.264 and always invalid for all
+	 * others
+	 */
+	bspp_check_and_detach_pps_info(str_alloc, picture_decoded->pps_id);
+	bspp_check_and_detach_pps_info(str_alloc, picture_decoded->second_pps_id);
+
+	return IMG_SUCCESS;
+}
+
+/*
+ * @Function	bspp_service_pictures_decoded
+ *
+ */
+static int bspp_service_pictures_decoded(struct bspp_str_context *str_ctx)
+{
+	struct bspp_picture_decoded *picture_decoded;
+
+	while (1) {
+		/*
+		 * Lock access to the list for removing a picture -
+		 * LOW PRIORITY
+		 */
+		mutex_lock_nested(str_ctx->bspp_mutex, SUBCLASS_BSPP);
+
+		picture_decoded = lst_removehead(&str_ctx->decoded_pictures_list);
+
+		/*
+		 * Unlock access to the list for removing a picture -
+		 * LOW PRIORITY
+		 */
+		mutex_unlock(str_ctx->bspp_mutex);
+
+		if (!picture_decoded)
+			break;
+
+		bspp_picture_decoded(str_ctx, picture_decoded);
+		kfree(picture_decoded);
+	}
+
+	return IMG_SUCCESS;
+}
+
+static void bspp_remove_unused_vps(struct bspp_str_context *str_ctx, unsigned int vps_id)
+{
+	struct bspp_stream_alloc_data *str_alloc = &str_ctx->str_alloc;
+	struct bspp_vps_info *temp_vps_info = NULL;
+	struct bspp_vps_info *next_temp_vps_info = NULL;
+
+	/*
+	 * Check the whole Vps slot list for any unused Vpss
+	 * BEFORE ADDING THE NEW ONE, if found remove them
+	 */
+	next_temp_vps_info = lst_first(&str_alloc->vps_data_list[vps_id]);
+	while (next_temp_vps_info) {
+		/* Set Temp, it is the one which we will potentially remove */
+		temp_vps_info = next_temp_vps_info;
+		/*
+		 *  Set Next Temp, it is the one for the next iteration
+		 * (we cannot ask for next after removing it)
+		 */
+		next_temp_vps_info = lst_next(temp_vps_info);
+		/* If it is not used remove it */
+		if (temp_vps_info->ref_count == 0 && next_temp_vps_info) {
+			/* Return resource to the available pool */
+			lst_remove(&str_alloc->vps_data_list[vps_id], temp_vps_info);
+			lst_addhead(&str_alloc->available_vps_list, temp_vps_info);
+		}
+	}
+}
+
+static void bspp_remove_unused_pps(struct bspp_str_context *str_ctx, unsigned int pps_id)
+{
+	struct bspp_stream_alloc_data *str_alloc = &str_ctx->str_alloc;
+	struct bspp_pps_info *temp_pps_info = NULL;
+	struct bspp_pps_info *next_temp_pps_info = NULL;
+
+	/*
+	 * Check the whole PPS slot list for any unused PPSs BEFORE ADDING
+	 * THE NEW ONE, if found remove them
+	 */
+	next_temp_pps_info = lst_first(&str_alloc->pps_data_list[pps_id]);
+	while (next_temp_pps_info) {
+		/* Set Temp, it is the one which we will potentially remove */
+		temp_pps_info = next_temp_pps_info;
+		/*
+		 * Set Next Temp, it is the one for the next iteration
+		 * (we cannot ask for next after removing it)
+		 */
+		next_temp_pps_info = lst_next(temp_pps_info);
+		/* If it is not used remove it */
+		if (temp_pps_info->ref_count == 0 && next_temp_pps_info) {
+			/* Return resource to the available pool */
+			lst_remove(&str_alloc->pps_data_list[pps_id], temp_pps_info);
+			lst_addhead(&str_alloc->available_ppss_list, temp_pps_info);
+		}
+	}
+}
+
+static void bspp_remove_unused_sequence(struct bspp_str_context *str_ctx, unsigned int sps_id)
+{
+	struct bspp_stream_alloc_data *str_alloc = &str_ctx->str_alloc;
+	struct bspp_sequence_hdr_info *seq = NULL;
+	struct bspp_sequence_hdr_info *next_seq = NULL;
+
+	/*
+	 * Check the whole sequence slot list for any unused sequences,
+	 * if found remove them
+	 */
+	next_seq = lst_first(&str_alloc->sequence_data_list[sps_id]);
+	while (next_seq) {
+		/* Set Temp, it is the one which we will potentially remove */
+		seq = next_seq;
+		/*
+		 * Set Next Temp, it is the one for the next iteration (we
+		 * cannot ask for next after removing it)
+		 */
+		next_seq = lst_next(seq);
+
+		/*
+		 * If the head is no longer used and there is something after,
+		 * remove it
+		 */
+		if (seq->ref_count == 0 && next_seq) {
+			/* Return resource to the pool-list */
+			lst_remove(&str_alloc->sequence_data_list[sps_id], seq);
+			if (str_ctx->parser_callbacks.release_data_cb) {
+				str_ctx->parser_callbacks.release_data_cb
+							((void *)str_alloc,
+							 BSPP_UNIT_SEQUENCE,
+							 seq->secure_sequence_info);
+			}
+			lst_addhead(&str_alloc->available_sequence_list, seq);
+		}
+	}
+}
+
+/*
+ * @Function	bspp_return_or_store_sequence_hdr
+ *
+ */
+static int bspp_return_or_store_sequence_hdr(struct bspp_str_context *str_ctx,
+					     enum bspp_error_type parse_error,
+					     struct bspp_sequence_hdr_info *sequ_hdr_info)
+{
+	struct bspp_stream_alloc_data *str_alloc = &str_ctx->str_alloc;
+	struct bspp_sequence_hdr_info *prev_sequ_hdr_info;
+
+	if (((parse_error & BSPP_ERROR_UNRECOVERABLE) || (parse_error & BSPP_ERROR_UNSUPPORTED)) &&
+	    sequ_hdr_info->sequ_hdr_info.sequ_hdr_id != BSPP_INVALID) {
+		prev_sequ_hdr_info =
+			lst_last(&str_alloc->sequence_data_list
+					[sequ_hdr_info->sequ_hdr_info.sequ_hdr_id]);
+
+		/* check if it's not the same pointer */
+		if (prev_sequ_hdr_info && prev_sequ_hdr_info != sequ_hdr_info) {
+			/*
+			 * Throw away corrupted sequence header if a previous "good" one exists.
+			 */
+			sequ_hdr_info->sequ_hdr_info.sequ_hdr_id = BSPP_INVALID;
+		}
+	}
+
+	/* Store or return Sequence resource. */
+	if (sequ_hdr_info->sequ_hdr_info.sequ_hdr_id != BSPP_INVALID) {
+		/* Only add when not already in list. */
+		if (sequ_hdr_info != lst_last(&str_alloc->sequence_data_list
+				[sequ_hdr_info->sequ_hdr_info.sequ_hdr_id])) {
+			/*
+			 * Add new sequence header (not already in list) to end
+			 * of the slot-list.
+			 */
+			lst_add(&str_alloc->sequence_data_list
+				[sequ_hdr_info->sequ_hdr_info.sequ_hdr_id], sequ_hdr_info);
+		}
+
+		bspp_remove_unused_sequence(str_ctx, sequ_hdr_info->sequ_hdr_info.sequ_hdr_id);
+	} else {
+		/*
+		 * if unit was not a sequnce info, add resource to the
+		 * pool-list
+		 */
+		lst_addhead(&str_alloc->available_sequence_list, sequ_hdr_info);
+	}
+
+	return IMG_SUCCESS;
+}
+
+/*
+ * @Function	bspp_get_resource
+ *
+ */
+static int bspp_get_resource(struct bspp_str_context *str_ctx,
+			     struct bspp_pict_hdr_info *pict_hdr_info,
+			     struct bspp_unit_data *unit_data)
+{
+	int result = IMG_SUCCESS;
+	struct bspp_stream_alloc_data *str_alloc = &str_ctx->str_alloc;
+
+	switch (unit_data->unit_type) {
+	case BSPP_UNIT_VPS:
+		/* Get VPS resource (HEVC only). */
+		if (unit_data->vid_std != VDEC_STD_HEVC)
+			break;
+		unit_data->out.vps_info = lst_removehead(&str_alloc->available_vps_list);
+		if (!unit_data->out.vps_info) {
+			result = IMG_ERROR_COULD_NOT_OBTAIN_RESOURCE;
+		} else {
+			unit_data->out.vps_info->vps_id = BSPP_INVALID;
+			unit_data->out.vps_info->ref_count = 0;
+		}
+		break;
+	case BSPP_UNIT_SEQUENCE:
+		unit_data->out.sequ_hdr_info = bspp_obtain_sequence_hdr(str_ctx);
+		if (!unit_data->out.sequ_hdr_info)
+			result = IMG_ERROR_COULD_NOT_OBTAIN_RESOURCE;
+
+		break;
+
+	case BSPP_UNIT_PPS:
+		/* Get PPS resource (H.264 only). */
+		unit_data->out.pps_info = lst_removehead(&str_alloc->available_ppss_list);
+		/* allocate and return extra resources */
+		if (!unit_data->out.pps_info) {
+			result = IMG_ERROR_COULD_NOT_OBTAIN_RESOURCE;
+		} else {
+			bspp_reset_pps(str_ctx, unit_data->out.pps_info);
+			unit_data->out.pps_info->pps_id = BSPP_INVALID;
+		}
+		break;
+
+	case BSPP_UNIT_PICTURE:
+	case BSPP_UNIT_SKIP_PICTURE:
+		unit_data->out.pict_hdr_info = pict_hdr_info;
+#ifdef HAS_JPEG
+		if (unit_data->vid_std == VDEC_STD_JPEG) {
+			unit_data->impl_sequ_hdr_info = bspp_obtain_sequence_hdr(str_ctx);
+			if (!unit_data->impl_sequ_hdr_info)
+				result = IMG_ERROR_COULD_NOT_OBTAIN_RESOURCE;
+		}
+#endif
+		break;
+
+	default:
+		break;
+	}
+
+	return result;
+}
+
+/*
+ * @Function	bspp_file_resource
+ * @Description	Stores or returns all resources provided to parse unit.
+ */
+static int bspp_file_resource(struct bspp_str_context *str_ctx, struct bspp_unit_data *unit_data)
+{
+	unsigned int result = IMG_SUCCESS;
+	struct bspp_stream_alloc_data *str_alloc = &str_ctx->str_alloc;
+
+	switch (unit_data->unit_type) {
+	case BSPP_UNIT_VPS:
+		/* Store or return VPS resource (HEVC only) */
+		if (unit_data->vid_std != VDEC_STD_HEVC)
+			break;
+
+		if (unit_data->out.vps_info->vps_id != BSPP_INVALID) {
+			lst_add(&str_alloc->vps_data_list[unit_data->out.vps_info->vps_id],
+				unit_data->out.vps_info);
+
+			bspp_remove_unused_vps(str_ctx, unit_data->out.vps_info->vps_id);
+		} else {
+			lst_addhead(&str_alloc->available_vps_list, unit_data->out.vps_info);
+		}
+		break;
+	case BSPP_UNIT_SEQUENCE:
+		result = bspp_return_or_store_sequence_hdr(str_ctx, unit_data->parse_error,
+							   unit_data->out.sequ_hdr_info);
+		VDEC_ASSERT(result == IMG_SUCCESS);
+		break;
+
+	case BSPP_UNIT_PPS:
+		/* Store or return PPS resource (H.264 only). */
+		if (unit_data->out.pps_info->pps_id != BSPP_INVALID) {
+			/*
+			 * if unit was a PPS info, add resource to the slot-list
+			 * AFTER REMOVING THE UNUSED ONES otherwise this will be
+			 * removed along the rest unless special provision for
+			 * last is made
+			 */
+			lst_add(&str_alloc->pps_data_list[unit_data->out.pps_info->pps_id],
+				unit_data->out.pps_info);
+
+			bspp_remove_unused_pps(str_ctx, unit_data->out.pps_info->pps_id);
+		} else {
+			/*
+			 * if unit was not a PPS info, add resource to the
+			 * pool-list
+			 */
+			lst_addhead(&str_alloc->available_ppss_list, unit_data->out.pps_info);
+		}
+		break;
+
+	case BSPP_UNIT_PICTURE:
+	case BSPP_UNIT_SKIP_PICTURE:
+#ifdef HAS_JPEG
+		if (unit_data->vid_std == VDEC_STD_JPEG) {
+			result = bspp_return_or_store_sequence_hdr(str_ctx,
+								   unit_data->parse_error,
+								   unit_data->impl_sequ_hdr_info);
+			VDEC_ASSERT(result == IMG_SUCCESS);
+		}
+#endif
+		break;
+
+	default:
+		break;
+	}
+
+	return result;
+}
+
+/*
+ * @Function	bspp_process_unit
+ *
+ */
+static int bspp_process_unit(struct bspp_str_context *str_ctx,
+			     unsigned int size_delim_bits,
+			     struct bspp_pict_ctx *pict_ctx,
+			     struct bspp_parse_state *parse_state)
+{
+	struct bspp_unit_data unit_data;
+	unsigned long long unit_size = 0; /* Unit size (in bytes, size delimited only). */
+	unsigned int result;
+	unsigned char vidx = str_ctx->grp_bstr_ctx.current_view_idx;
+	struct bspp_pict_hdr_info *curr_pict_hdr_info;
+
+	/*
+	 * during call to swsr_consume_delim(), above.
+	 * Setup default unit data.
+	 */
+	memset(&unit_data, 0, sizeof(struct bspp_unit_data));
+
+	if (str_ctx->grp_bstr_ctx.delim_present) {
+		/* Consume delimiter and catch any exceptions. */
+		/*
+		 * Consume the bitstream unit delimiter (size or
+		 * start code prefix).
+		 * When size-delimited the unit size is also returned
+		 * so that the next unit can be found.
+		 */
+		result = swsr_consume_delim(str_ctx->swsr_ctx.swsr_context,
+					    str_ctx->swsr_ctx.emulation_prevention,
+					    size_delim_bits, &unit_size);
+		if (result != IMG_SUCCESS)
+			goto error;
+	}
+
+	unit_data.unit_type = str_ctx->grp_bstr_ctx.unit_type;
+	unit_data.vid_std = str_ctx->vid_std;
+	unit_data.delim_present = str_ctx->grp_bstr_ctx.delim_present;
+	unit_data.codec_config = &str_ctx->codec_config;
+	unit_data.parse_state = parse_state;
+	unit_data.pict_sequ_hdr_id = str_ctx->sequ_hdr_id;
+	unit_data.str_res_handle = &str_ctx->str_alloc;
+	unit_data.unit_data_size = str_ctx->grp_bstr_ctx.total_data_size;
+	unit_data.intra_frm_as_closed_gop = str_ctx->intra_frame_closed_gop;
+
+	/* ponit to picture headers, check boundaries */
+	curr_pict_hdr_info = vidx < VDEC_H264_MVC_MAX_VIEWS ?
+		&pict_ctx->pict_hdr_info[vidx] : NULL;
+	unit_data.parse_state->next_pict_hdr_info =
+		vidx + 1 < VDEC_H264_MVC_MAX_VIEWS ?
+		&pict_ctx->pict_hdr_info[vidx + 1] : NULL;
+	unit_data.parse_state->is_prefix = 0;
+
+	/* Obtain output data containers. */
+	result = bspp_get_resource(str_ctx, curr_pict_hdr_info, &unit_data);
+	if (result != IMG_SUCCESS)
+		return result;
+
+	/* Process Unit and catch any exceptions. */
+	/*
+	 * Call the standard-specific function to parse the bitstream
+	 * unit.
+	 */
+	result = str_ctx->parser_callbacks.parse_unit_cb(str_ctx->swsr_ctx.swsr_context,
+			&unit_data);
+	if (result != IMG_SUCCESS) {
+		pr_err("Failed to process unit, error = %d", unit_data.parse_error);
+		goto error;
+	}
+
+	if (unit_data.parse_error != BSPP_ERROR_NONE)
+		pr_err("Issues found while processing unit, error = %d\n", unit_data.parse_error);
+
+	/* Store or return resource used for parsing unit. */
+	result = bspp_file_resource(str_ctx, &unit_data);
+
+	if (!str_ctx->inter_pict_data.seen_closed_gop &&
+	    str_ctx->grp_bstr_ctx.unit_type == BSPP_UNIT_PICTURE &&
+		unit_data.slice &&
+		(unit_data.out.pict_hdr_info &&
+		unit_data.out.pict_hdr_info->intra_coded) &&
+		str_ctx->vid_std != VDEC_STD_H264)
+		unit_data.new_closed_gop = 1;
+
+	if (unit_data.new_closed_gop) {
+		str_ctx->inter_pict_data.seen_closed_gop = 1;
+		str_ctx->inter_pict_data.new_closed_gop = 1;
+	}
+
+	/*
+	 * Post-process unit (use local context in case
+	 * parse function tried to change the unit type.
+	 */
+	if (str_ctx->grp_bstr_ctx.unit_type == BSPP_UNIT_PICTURE ||
+	    str_ctx->grp_bstr_ctx.unit_type == BSPP_UNIT_SKIP_PICTURE) {
+		if (str_ctx->inter_pict_data.new_closed_gop) {
+			pict_ctx->closed_gop = 1;
+			str_ctx->inter_pict_data.new_closed_gop = 0;
+		}
+
+		if (unit_data.ext_slice && str_ctx->grp_bstr_ctx.not_ext_pic_unit_yet &&
+		    unit_data.pict_sequ_hdr_id != BSPP_INVALID) {
+			unsigned int id = unit_data.pict_sequ_hdr_id;
+
+			str_ctx->grp_bstr_ctx.not_ext_pic_unit_yet = 0;
+			pict_ctx->ext_sequ_hdr_info =
+				lst_last(&str_ctx->str_alloc.sequence_data_list[id]);
+		}
+
+		if (unit_data.slice) {
+			if (!curr_pict_hdr_info) {
+				VDEC_ASSERT(0);
+				return -EINVAL;
+			}
+			if (str_ctx->grp_bstr_ctx.not_pic_unit_yet &&
+			    unit_data.pict_sequ_hdr_id != BSPP_INVALID) {
+				str_ctx->grp_bstr_ctx.not_pic_unit_yet = 0;
+
+				/*
+				 * depend upon the picture header being
+				 * populated (in addition to slice data).
+				 */
+				pict_ctx->present = 1;
+
+				/*
+				 * Update the picture context from the last unit parsed.
+				 * This context must be stored since a non-picture unit may follow.
+				 * Obtain current instance of sequence data for given ID.
+				 */
+				if (!pict_ctx->sequ_hdr_info) {
+					unsigned int id = unit_data.pict_sequ_hdr_id;
+
+					pict_ctx->sequ_hdr_info =
+					lst_last(&str_ctx->str_alloc.sequence_data_list[id]);
+
+					/* Do the sequence flagging/reference-counting */
+					pict_ctx->sequ_hdr_info->ref_count++;
+				}
+
+				/* Override the field here. */
+				if (str_ctx->swsr_ctx.sr_config.delim_type == SWSR_DELIM_NONE) {
+					if (str_ctx->grp_bstr_ctx.unit_type ==
+						BSPP_UNIT_SKIP_PICTURE) {
+						/* VDECFW_SKIPPED_PICTURE; */
+						curr_pict_hdr_info->parser_mode =
+							VDECFW_SKIPPED_PICTURE;
+						curr_pict_hdr_info->pic_data_size = 0;
+					} else {
+						/* VDECFW_SIZE_SIDEBAND; */
+						curr_pict_hdr_info->parser_mode =
+							VDECFW_SIZE_SIDEBAND;
+						curr_pict_hdr_info->pic_data_size =
+							str_ctx->grp_bstr_ctx.total_data_size;
+					}
+				} else if (str_ctx->swsr_ctx.sr_config.delim_type ==
+					   SWSR_DELIM_SIZE) {
+					if (str_ctx->swsr_ctx.sr_config.delim_length <= 8)
+						/* VDECFW_SIZE_DELIMITED_1_ONLY; */
+						curr_pict_hdr_info->parser_mode =
+							VDECFW_SIZE_DELIMITED_1_ONLY;
+					else if (str_ctx->swsr_ctx.sr_config.delim_length <= 16)
+						/* VDECFW_SIZE_DELIMITED_2_ONLY; */
+						curr_pict_hdr_info->parser_mode =
+							VDECFW_SIZE_DELIMITED_2_ONLY;
+					else if (str_ctx->swsr_ctx.sr_config.delim_length <= 32)
+						/* VDECFW_SIZE_DELIMITED_4_ONLY; */
+						curr_pict_hdr_info->parser_mode =
+							VDECFW_SIZE_DELIMITED_4_ONLY;
+
+					curr_pict_hdr_info->pic_data_size +=
+						((unsigned int)unit_size
+						+ (size_delim_bits / 8));
+				} else if (str_ctx->swsr_ctx.sr_config.delim_type == SWSR_DELIM_SCP)
+					/* VDECFW_SCP_ONLY; */
+					curr_pict_hdr_info->parser_mode = VDECFW_SCP_ONLY;
+			}
+
+			/*
+			 * for MVC, the Slice Extension should also have the
+			 * same ParserMode as the Base view.
+			 */
+			if (unit_data.parse_state->next_pict_hdr_info) {
+				unit_data.parse_state->next_pict_hdr_info->parser_mode =
+					curr_pict_hdr_info->parser_mode;
+			}
+
+			if (unit_data.parse_error & BSPP_ERROR_UNSUPPORTED) {
+				pict_ctx->invalid = 1;
+				pict_ctx->unsupported = 1;
+			} else if (!str_ctx->full_scan) {
+				/*
+				 * Only parse up to and including the first
+				 * valid video slice unless full scanning.
+				 */
+				pict_ctx->finished = 1;
+			}
+		}
+	}
+
+	if (unit_data.extracted_all_data) {
+		enum swsr_found found;
+
+		swsr_byte_align(str_ctx->swsr_ctx.swsr_context);
+
+		found = swsr_check_delim_or_eod(str_ctx->swsr_ctx.swsr_context);
+		if (found != SWSR_FOUND_DELIM && found != SWSR_FOUND_EOD) {
+			/*
+			 * Should already be at the next delimiter or EOD.
+			 * Any bits left at the end of the unit could indicate
+			 * corrupted syntax or erroneous parsing.
+			 */
+		}
+	}
+
+	return IMG_SUCCESS;
+
+error:
+	if (unit_data.unit_type == BSPP_UNIT_PICTURE ||
+	    unit_data.unit_type == BSPP_UNIT_SKIP_PICTURE)
+		pict_ctx->invalid = 1;
+
+	/*
+	 * Tidy-up resources.
+	 * Store or return resource used for parsing unit.
+	 */
+	bspp_file_resource(str_ctx, &unit_data);
+
+	return result;
+}
+
+/*
+ * @Function	bspp_terminate_buffer
+ *
+ */
+static int bspp_terminate_buffer(struct bspp_grp_bstr_ctx *grp_btsr_ctx,
+				 struct bspp_bitstream_buffer *buf)
+{
+	int result = -1;
+
+	/* Indicate that all the data in buffer should be added to segment. */
+	buf->bytes_read = buf->data_size;
+
+	result = bspp_create_segment(grp_btsr_ctx, buf);
+	if (result != IMG_SUCCESS)
+		return result;
+
+	/* Next segment will start at the beginning of the next buffer. */
+	grp_btsr_ctx->segment_offset = 0;
+
+	bspp_free_bitstream_elem(buf);
+
+	return result;
+}
+
+/*
+ * @Function	bspp_jump_to_next_view
+ *
+ */
+static int bspp_jump_to_next_view(struct bspp_grp_bstr_ctx *grp_btsr_ctx,
+				  struct bspp_preparsed_data *preparsed_data,
+				  struct bspp_parse_state *parse_state)
+{
+	struct bspp_bitstream_buffer *cur_buf;
+	int result;
+	unsigned int i;
+	unsigned char vidx;
+
+	if (!grp_btsr_ctx || !parse_state || !preparsed_data) {
+		result = IMG_ERROR_INVALID_PARAMETERS;
+		goto error;
+	}
+
+	vidx = grp_btsr_ctx->current_view_idx;
+
+	if (vidx >= VDEC_H264_MVC_MAX_VIEWS) {
+		result = IMG_ERROR_NOT_SUPPORTED;
+		goto error;
+	}
+
+	/* get current buffer */
+	cur_buf = (struct bspp_bitstream_buffer *)lst_first(&grp_btsr_ctx->in_flight_bufs);
+	if (!cur_buf) {
+		result = IMG_ERROR_CANCELLED;
+		goto error;
+	}
+
+	if (cur_buf->bufmap_id != parse_state->prev_buf_map_id) {
+		/*
+		 * If we moved to the next buffer while parsing the slice
+		 * header of the new view we have to reduce the size of
+		 * the last segment up to the beginning of the new view slice
+		 * and create a new segment from that point up to the end of
+		 * the buffer. The new segment should belong to the new view.
+		 * THIS ONLY WORKS IF THE SLICE HEADER DOES NOT SPAN MORE THAN
+		 * TWO BUFFERS. If we want to support the case that the slice
+		 * header of the new view spans multiple buffer we either have
+		 * here remove all the segments up to the point were we find
+		 * the buffer we are looking for, then adjust the size of this
+		 * segment and then add the segments we removed to the next
+		 * view list or we can implement a mechanism like the one that
+		 * peeks for the NAL unit type and delimit the next view
+		 * segment before parsing the first slice of the view.
+		 */
+		struct bspp_bitstr_seg *segment;
+
+		segment = lst_last(grp_btsr_ctx->segment_list);
+		if (segment && segment->bufmap_id == parse_state->prev_buf_map_id) {
+			struct bspp_bitstream_buffer prev_buf;
+
+			segment->data_size -= parse_state->prev_buf_data_size
+				- parse_state->prev_byte_offset_buf;
+			segment->bstr_seg_flag &= ~VDECDD_BSSEG_LASTINBUFF;
+
+			/*
+			 * Change the segmenOffset value with the value it
+			 * would have if we had delemited the segment correctly
+			 * beforehand.
+			 */
+			grp_btsr_ctx->segment_offset = parse_state->prev_byte_offset_buf;
+
+			/* set lists of segments to new view... */
+			for (i = 0; i < BSPP_MAX_PICTURES_PER_BUFFER; i++) {
+				grp_btsr_ctx->pre_pict_seg_list[i] =
+					&preparsed_data->ext_pictures_data[vidx].pre_pict_seg_list
+					[i];
+				grp_btsr_ctx->pict_seg_list[i] =
+					&preparsed_data->ext_pictures_data[vidx].pict_seg_list[i];
+
+				lst_init(grp_btsr_ctx->pre_pict_seg_list[i]);
+				lst_init(grp_btsr_ctx->pict_seg_list[i]);
+			}
+			/* and current segment list */
+			grp_btsr_ctx->segment_list = grp_btsr_ctx->pict_seg_list[0];
+
+			memset(&prev_buf, 0, sizeof(struct bspp_bitstream_buffer));
+			prev_buf.bufmap_id = segment->bufmap_id;
+			prev_buf.data_size = parse_state->prev_buf_data_size;
+			prev_buf.bytes_read = prev_buf.data_size;
+
+			/* Create the segment the first part of the next view */
+			result = bspp_create_segment(grp_btsr_ctx, &prev_buf);
+			if (result != IMG_SUCCESS)
+				goto error;
+		} else {
+			result = IMG_ERROR_NOT_SUPPORTED;
+			goto error;
+		}
+	} else {
+		/*
+		 * the data just parsed belongs to new view, so use previous byte
+		 * offset
+		 */
+		cur_buf->bytes_read = parse_state->prev_byte_offset_buf;
+
+		/* Create the segment for previous view */
+		result = bspp_create_segment(grp_btsr_ctx, cur_buf);
+		if (result != IMG_SUCCESS)
+			goto error;
+
+		/* set lists of segments to new view */
+		for (i = 0; i < BSPP_MAX_PICTURES_PER_BUFFER; i++) {
+			grp_btsr_ctx->pre_pict_seg_list[i] =
+				&preparsed_data->ext_pictures_data[vidx].pre_pict_seg_list[i];
+			grp_btsr_ctx->pict_seg_list[i] =
+				&preparsed_data->ext_pictures_data[vidx].pict_seg_list[i];
+
+			lst_init(grp_btsr_ctx->pre_pict_seg_list[i]);
+			lst_init(grp_btsr_ctx->pict_seg_list[i]);
+		}
+		/* and current segment list */
+		grp_btsr_ctx->segment_list = grp_btsr_ctx->pict_seg_list[0];
+	}
+
+	/* update prefix flag */
+	preparsed_data->ext_pictures_data[vidx].is_prefix = parse_state->is_prefix;
+	/* and view index */
+	grp_btsr_ctx->current_view_idx++;
+
+	/* set number of extended pictures */
+	preparsed_data->num_ext_pictures = grp_btsr_ctx->current_view_idx;
+
+error:
+	return result;
+}
+
+static void bspp_reset_pict_state(struct bspp_str_context *str_ctx, struct bspp_pict_ctx *pict_ctx,
+				  struct bspp_parse_state *parse_state)
+{
+	memset(pict_ctx, 0, sizeof(struct bspp_pict_ctx));
+	memset(parse_state, 0, sizeof(struct bspp_parse_state));
+
+	/* Setup group buffer processing state. */
+	parse_state->inter_pict_ctx = &str_ctx->inter_pict_data;
+	parse_state->prev_bottom_pic_flag = (unsigned char)BSPP_INVALID;
+	parse_state->next_pic_is_new = 1;
+	parse_state->prev_frame_num = BSPP_INVALID;
+	parse_state->second_field_flag = 0;
+	parse_state->first_chunk = 1;
+}
+
+/*
+ * @Function	bspp_stream_preparse_buffers
+ * @Description	Buffer list cannot be processed since units in this last buffer
+ * may not be complete. Must wait until a buffer is provided with end-of-picture
+ * signalled. When the buffer indicates that units won't span then we can
+ * process the bitstream buffer chain.
+ */
+int bspp_stream_preparse_buffers(void *str_context_handle,
+				 const struct bspp_ddbuf_info *contig_buf_info,
+				 unsigned int contig_buf_map_id, struct lst_t *segments,
+				 struct bspp_preparsed_data *preparsed_data,
+				 int end_of_pic)
+{
+	struct bspp_str_context *str_ctx = (struct bspp_str_context *)str_context_handle;
+	struct bspp_pict_ctx *pict_ctx = &str_ctx->pict_ctx;
+	struct bspp_parse_state *parse_state = &str_ctx->parse_state;
+	int i;
+	unsigned int unit_count = 0, num_arrays = 0;
+	unsigned int size_delim_bits = 0;
+	enum swsr_found found = SWSR_FOUND_NONE;
+	unsigned int result;
+	struct bspp_bitstr_seg *segment;
+	struct lst_t temp_list;
+
+	/*
+	 * since it is new picture, resetting the context status to
+	 * beginning
+	 */
+	/* TODO: revisit this */
+	pict_ctx->finished = 0;
+	pict_ctx->new_pict_signalled = 0;
+
+	if (!str_context_handle)
+		return IMG_ERROR_INVALID_PARAMETERS;
+
+	if (!segments || !preparsed_data)
+		return IMG_ERROR_INVALID_PARAMETERS;
+
+	/* Check that bitstream buffers have been registered. */
+	if (!lst_last(&str_ctx->grp_bstr_ctx.buffer_chain))
+		return IMG_ERROR_OPERATION_PROHIBITED;
+
+	/* Initialise the output data. */
+	memset(preparsed_data, 0, sizeof(struct bspp_preparsed_data));
+
+	if (!parse_state->initialised) {
+		bspp_reset_pict_state(str_ctx, pict_ctx, parse_state);
+		parse_state->initialised = 1;
+	}
+
+	for (i = 0; i < 3; i++) {
+		lst_init(&preparsed_data->picture_data.pre_pict_seg_list[i]);
+		lst_init(&preparsed_data->picture_data.pict_seg_list[i]);
+	}
+
+	/* Initialise parsing for this video standard. */
+	if (str_ctx->parser_callbacks.initialise_parsing_cb && parse_state->first_chunk)
+		str_ctx->parser_callbacks.initialise_parsing_cb(parse_state);
+
+	parse_state->first_chunk = 0;
+
+	for (i = 0; i < VDEC_H264_MVC_MAX_VIEWS; i++) {
+		pict_ctx->pict_hdr_info[i].pict_aux_data.id = BSPP_INVALID;
+		pict_ctx->pict_hdr_info[i].second_pict_aux_data.id = BSPP_INVALID;
+	}
+
+	/* Setup buffer group bitstream context. */
+	str_ctx->grp_bstr_ctx.vid_std = str_ctx->vid_std;
+	str_ctx->grp_bstr_ctx.disable_mvc = str_ctx->disable_mvc;
+	str_ctx->grp_bstr_ctx.delim_present = 1;
+	str_ctx->grp_bstr_ctx.swsr_context = str_ctx->swsr_ctx.swsr_context;
+	str_ctx->grp_bstr_ctx.unit_type = BSPP_UNIT_NONE;
+	str_ctx->grp_bstr_ctx.last_unit_type = BSPP_UNIT_NONE;
+	str_ctx->grp_bstr_ctx.not_pic_unit_yet = 1;
+	str_ctx->grp_bstr_ctx.not_ext_pic_unit_yet = 1;
+	str_ctx->grp_bstr_ctx.total_bytes_read = 0;
+	str_ctx->grp_bstr_ctx.current_view_idx = 0;
+
+	for (i = 0; i < 3; i++) {
+		str_ctx->grp_bstr_ctx.pre_pict_seg_list[i] =
+			&preparsed_data->picture_data.pre_pict_seg_list[i];
+		str_ctx->grp_bstr_ctx.pict_seg_list[i] =
+			&preparsed_data->picture_data.pict_seg_list[i];
+		str_ctx->grp_bstr_ctx.pict_tag_param_array[i] =
+			&preparsed_data->picture_data.pict_tag_param[i];
+	}
+	str_ctx->grp_bstr_ctx.segment_list = str_ctx->grp_bstr_ctx.pre_pict_seg_list[0];
+	str_ctx->grp_bstr_ctx.pict_tag_param = str_ctx->grp_bstr_ctx.pict_tag_param_array[0];
+	str_ctx->grp_bstr_ctx.free_segments = segments;
+	str_ctx->grp_bstr_ctx.segment_offset = 0;
+	str_ctx->grp_bstr_ctx.insert_start_code = 0;
+
+	/*
+	 * Before processing the units service all the picture decoded events
+	 * to free the resources1794
+	 */
+	bspp_service_pictures_decoded(str_ctx);
+
+	/*
+	 * A picture currently being parsed is already decoded (may happen
+	 * after dwr in low latency mode) and its recourses were freed. Skip
+	 * the rest of the picture.
+	 */
+	if (pict_ctx->sequ_hdr_info && pict_ctx->sequ_hdr_info->ref_count == 0) {
+		pict_ctx->present = 0;
+		pict_ctx->finished = 1;
+	}
+
+	/*
+	 * For bitstreams without unit delimiters treat all the buffers as
+	 * a single unit whose type is defined by the first buffer element.
+	 */
+	if (str_ctx->swsr_ctx.sr_config.delim_type == SWSR_DELIM_NONE) {
+		struct bspp_bitstream_buffer *cur_buf =
+			lst_first(&str_ctx->grp_bstr_ctx.buffer_chain);
+
+		/* if there is no picture data we must be skipped. */
+		if (!cur_buf || cur_buf->data_size == 0) {
+			str_ctx->grp_bstr_ctx.unit_type = BSPP_UNIT_SKIP_PICTURE;
+		} else if (cur_buf->bstr_element_type == VDEC_BSTRELEMENT_CODEC_CONFIG) {
+			str_ctx->grp_bstr_ctx.unit_type = BSPP_UNIT_SEQUENCE;
+		} else if (cur_buf->bstr_element_type == VDEC_BSTRELEMENT_PICTURE_DATA ||
+			 cur_buf->bstr_element_type == VDEC_BSTRELEMENT_UNSPECIFIED) {
+			str_ctx->grp_bstr_ctx.unit_type = BSPP_UNIT_PICTURE;
+			str_ctx->grp_bstr_ctx.segment_list = str_ctx->grp_bstr_ctx.pict_seg_list[0];
+		}
+
+		str_ctx->grp_bstr_ctx.delim_present = 0;
+	}
+
+	/*
+	 * Load the first section (buffer) of biststream into the software
+	 * shift-register. BSPP maps "buffer" to "section" and allows for
+	 * contiguous parsing of all buffers since unit boundaries are not
+	 * known up-front. Unit parsing and segment creation is happening in a
+	 * single pass.
+	 */
+	result = swsr_start_bitstream(str_ctx->swsr_ctx.swsr_context,
+				      &str_ctx->swsr_ctx.sr_config,
+				      str_ctx->grp_bstr_ctx.total_data_size,
+				      str_ctx->swsr_ctx.emulation_prevention);
+
+	/* Seek for next delimiter or end of data and catch any exceptions. */
+	if (str_ctx->grp_bstr_ctx.delim_present) {
+		/* Locate the first bitstream unit. */
+		found = swsr_seek_delim_or_eod(str_ctx->swsr_ctx.swsr_context);
+	}
+
+	if (str_ctx->swsr_ctx.sr_config.delim_type == SWSR_DELIM_SIZE) {
+		struct bspp_bitstream_buffer *cur_buf =
+			lst_first(&str_ctx->grp_bstr_ctx.in_flight_bufs);
+
+		if (cur_buf->bstr_element_type == VDEC_BSTRELEMENT_CODEC_CONFIG &&
+		    str_ctx->parser_callbacks.parse_codec_config_cb) {
+			/* Parse codec config header and catch any exceptions */
+			str_ctx->parser_callbacks.parse_codec_config_cb
+						(str_ctx->swsr_ctx.swsr_context,
+						 &unit_count,
+						 &num_arrays,
+						 &str_ctx->swsr_ctx.sr_config.delim_length,
+						 &size_delim_bits);
+		} else {
+			size_delim_bits = str_ctx->swsr_ctx.sr_config.delim_length;
+		}
+	}
+
+	/* Process all the bitstream units until the picture is located. */
+	while (found != SWSR_FOUND_EOD && !pict_ctx->finished) {
+		struct bspp_bitstream_buffer *cur_buf =
+			lst_first(&str_ctx->grp_bstr_ctx.in_flight_bufs);
+
+		if (!cur_buf) {
+			pr_err("%s: cur_buf pointer is NULL\n", __func__);
+			result = IMG_ERROR_INVALID_PARAMETERS;
+			goto error;
+		}
+
+		if (str_ctx->swsr_ctx.sr_config.delim_type ==
+			SWSR_DELIM_SIZE && cur_buf->bstr_element_type ==
+			VDEC_BSTRELEMENT_CODEC_CONFIG &&
+			str_ctx->parser_callbacks.update_unit_counts_cb) {
+			/*
+			 * Parse middle part of codec config header and catch
+			 * any exceptions.
+			 */
+			str_ctx->parser_callbacks.update_unit_counts_cb
+						(str_ctx->swsr_ctx.swsr_context,
+						 &unit_count,
+						 &num_arrays);
+		}
+
+		/* Process the next unit. */
+		result = bspp_process_unit(str_ctx, size_delim_bits, pict_ctx, parse_state);
+		if (result == IMG_ERROR_NOT_SUPPORTED)
+			goto error;
+
+		if (str_ctx->swsr_ctx.sr_config.delim_type != SWSR_DELIM_NONE)
+			str_ctx->grp_bstr_ctx.delim_present = 1;
+
+		/* jump to the next view */
+		if (parse_state->new_view) {
+			result = bspp_jump_to_next_view(&str_ctx->grp_bstr_ctx,
+							preparsed_data,
+							parse_state);
+			if (result != IMG_SUCCESS)
+				goto error;
+
+			parse_state->new_view = 0;
+		}
+
+		if (!pict_ctx->finished) {
+			/*
+			 * Seek for next delimiter or end of data and catch any
+			 * exceptions.
+			 */
+			/* Locate the next bitstream unit or end of data */
+			found = swsr_seek_delim_or_eod(str_ctx->swsr_ctx.swsr_context);
+
+			{
+				struct bspp_bitstream_buffer *buf;
+				/* Update the offset within current buffer. */
+				swsr_get_byte_offset_curbuf(str_ctx->grp_bstr_ctx.swsr_context,
+							    &parse_state->prev_byte_offset_buf);
+				buf = lst_first(&str_ctx->grp_bstr_ctx.in_flight_bufs);
+				if (buf) {
+					parse_state->prev_buf_map_id = buf->bufmap_id;
+					parse_state->prev_buf_data_size = buf->data_size;
+				}
+			}
+		}
+	}
+
+	/* Finalize parsing for this video standard. */
+	if (str_ctx->parser_callbacks.finalise_parsing_cb && end_of_pic) {
+		str_ctx->parser_callbacks.finalise_parsing_cb((void *)&str_ctx->str_alloc,
+			parse_state);
+	}
+
+	/*
+	 * Create segments for each buffer held by the software shift register
+	 * (and not yet processed).
+	 */
+	while (lst_first(&str_ctx->grp_bstr_ctx.in_flight_bufs)) {
+		struct bspp_bitstream_buffer *buf =
+			lst_removehead(&str_ctx->grp_bstr_ctx.in_flight_bufs);
+
+		result = bspp_terminate_buffer(&str_ctx->grp_bstr_ctx, buf);
+	}
+
+	/*
+	 * Create segments for each buffer not yet requested by the shift
+	 * register.
+	 */
+	while (lst_first(&str_ctx->grp_bstr_ctx.buffer_chain)) {
+		struct bspp_bitstream_buffer *buf =
+			lst_removehead(&str_ctx->grp_bstr_ctx.buffer_chain);
+
+		result = bspp_terminate_buffer(&str_ctx->grp_bstr_ctx, buf);
+	}
+
+	/*
+	 * Populate the parsed data information for picture only if one is
+	 * present. The anonymous data has already been added to the
+	 * appropriate segment list.
+	 */
+	if (pict_ctx->present && !pict_ctx->invalid) {
+		if (!pict_ctx->new_pict_signalled) {
+			/*
+			 * Provide data about sequence used by picture.
+			 * Signal "new sequence" if the sequence header is new
+			 * or has changed. always switch seq when changing base
+			 * and additional views
+			 */
+			if (pict_ctx->sequ_hdr_info) {
+				if (pict_ctx->sequ_hdr_info->sequ_hdr_info.sequ_hdr_id !=
+					str_ctx->sequ_hdr_id ||
+					pict_ctx->sequ_hdr_info->ref_count == 1 ||
+					pict_ctx->ext_sequ_hdr_info ||
+					pict_ctx->closed_gop) {
+					preparsed_data->new_sequence = 1;
+					preparsed_data->sequ_hdr_info =
+						pict_ctx->sequ_hdr_info->sequ_hdr_info;
+				}
+			}
+
+			/* Signal "new subsequence" and its common header information. */
+			if (pict_ctx->ext_sequ_hdr_info) {
+				preparsed_data->new_sub_sequence = 1;
+				preparsed_data->ext_sequ_hdr_info =
+					pict_ctx->ext_sequ_hdr_info->sequ_hdr_info;
+
+				for (i = 0; i < VDEC_H264_MVC_MAX_VIEWS - 1;
+					i++) {
+					/*
+					 * prefix is always the last one
+					 * do not attach any header info to it
+					 */
+					if (preparsed_data->ext_pictures_data[i].is_prefix)
+						break;
+
+					/* attach headers */
+					preparsed_data->ext_pictures_data[i].sequ_hdr_id =
+					pict_ctx->ext_sequ_hdr_info->sequ_hdr_info.sequ_hdr_id;
+					pict_ctx->ext_sequ_hdr_info->ref_count++;
+					preparsed_data->ext_pictures_data[i].pict_hdr_info =
+						pict_ctx->pict_hdr_info[i + 1];
+				}
+
+				preparsed_data->ext_pictures_data
+					[0].pict_hdr_info.first_pic_of_sequence =
+					preparsed_data->new_sub_sequence;
+
+				/*
+				 * Update the base view common sequence info
+				 * with the number of views that the stream has.
+				 * Otherwise the number of views is inconsistent
+				 * between base view sequence and dependent view
+				 * sequences. Also base view sequence appears
+				 * with one view and the driver calculates the
+				 * wrong number of resources.
+				 */
+				preparsed_data->sequ_hdr_info.com_sequ_hdr_info.num_views =
+				preparsed_data->ext_sequ_hdr_info.com_sequ_hdr_info.num_views;
+			}
+
+			/* Signal if this picture is the first in a closed GOP */
+			if (pict_ctx->closed_gop) {
+				preparsed_data->closed_gop = 1;
+				preparsed_data->sequ_hdr_info.com_sequ_hdr_info.not_dpb_flush =
+					str_ctx->inter_pict_data.not_dpb_flush;
+			}
+
+			/*
+			 * Signal "new picture" and its common header
+			 * information.
+			 */
+			preparsed_data->new_picture = 1;
+			if (pict_ctx->sequ_hdr_info) {
+				preparsed_data->picture_data.sequ_hdr_id =
+					pict_ctx->sequ_hdr_info->sequ_hdr_info.sequ_hdr_id;
+			}
+			preparsed_data->picture_data.pict_hdr_info = pict_ctx->pict_hdr_info[0];
+
+			preparsed_data->picture_data.pict_hdr_info.first_pic_of_sequence =
+				preparsed_data->new_sequence;
+			if (contig_buf_info)
+				preparsed_data->picture_data.pict_hdr_info.fragmented_data = 1;
+			else
+				preparsed_data->picture_data.pict_hdr_info.fragmented_data = 0;
+
+			str_ctx->sequ_hdr_id = preparsed_data->picture_data.sequ_hdr_id;
+
+			pict_ctx->new_pict_signalled = 1;
+
+			/*
+			 * aso/fmo supported only when a frame is submitted as
+			 * a whole
+			 */
+			if (parse_state->discontinuous_mb && !end_of_pic)
+				result = IMG_ERROR_NOT_SUPPORTED;
+		} else {
+			preparsed_data->new_fragment = 1;
+
+			if (parse_state->discontinuous_mb)
+				result = IMG_ERROR_NOT_SUPPORTED;
+		}
+
+		lst_init(&temp_list);
+
+		segment = lst_removehead(&preparsed_data->picture_data.pict_seg_list[0]);
+		while (segment) {
+			lst_add(&temp_list, segment);
+			segment = lst_removehead(&preparsed_data->picture_data.pict_seg_list[0]);
+		}
+
+		segment = lst_removehead(&str_ctx->inter_pict_data.pic_prefix_seg);
+		while (segment) {
+			lst_add(&preparsed_data->picture_data.pict_seg_list[0],
+				segment);
+			segment = lst_removehead(&str_ctx->inter_pict_data.pic_prefix_seg);
+		}
+
+		segment = lst_removehead(&temp_list);
+		while (segment) {
+			lst_add(&preparsed_data->picture_data.pict_seg_list[0],
+				segment);
+			segment = lst_removehead(&temp_list);
+		}
+
+		for (i = 0; i < VDEC_H264_MVC_MAX_VIEWS; i++) {
+			unsigned int j;
+			struct bspp_picture_data *ext_pic_data =
+				&preparsed_data->ext_pictures_data[i];
+
+			if (preparsed_data->ext_pictures_data[i].is_prefix) {
+				for (j = 0; j < BSPP_MAX_PICTURES_PER_BUFFER;
+					j++) {
+					segment = lst_removehead(&ext_pic_data->pict_seg_list[j]);
+					while (segment) {
+						lst_add(&str_ctx->inter_pict_data.pic_prefix_seg,
+							segment);
+						segment = lst_removehead
+								(&ext_pic_data->pict_seg_list[j]);
+					}
+				}
+				preparsed_data->num_ext_pictures--;
+				break;
+			}
+		}
+	} else if (pict_ctx->present && pict_ctx->sequ_hdr_info) {
+		/*
+		 * Reduce the reference count since this picture will not be
+		 * decoded.
+		 */
+		pict_ctx->sequ_hdr_info->ref_count--;
+		/* Release sequence data. */
+		if (str_ctx->parser_callbacks.release_data_cb) {
+			str_ctx->parser_callbacks.release_data_cb((void *)&str_ctx->str_alloc,
+				BSPP_UNIT_SEQUENCE,
+				pict_ctx->sequ_hdr_info->secure_sequence_info);
+		}
+	}
+
+	/* Reset the group bitstream context */
+	lst_init(&str_ctx->grp_bstr_ctx.buffer_chain);
+	memset(&str_ctx->grp_bstr_ctx, 0, sizeof(str_ctx->grp_bstr_ctx));
+
+	/*
+	 * for now: return IMG_ERROR_NOT_SUPPORTED only if explicitly set by
+	 * parser
+	 */
+	result = (result == IMG_ERROR_NOT_SUPPORTED) ?
+		IMG_ERROR_NOT_SUPPORTED : IMG_SUCCESS;
+
+	if (end_of_pic)
+		parse_state->initialised = 0;
+
+	return result;
+
+error:
+	/* Free the SWSR list of buffers */
+	while (lst_first(&str_ctx->grp_bstr_ctx.in_flight_bufs))
+		lst_removehead(&str_ctx->grp_bstr_ctx.in_flight_bufs);
+
+	return result;
+}
+
+/*
+ * @Function	bspp_stream_destroy
+ *
+ */
+int bspp_stream_destroy(void *str_context_handle)
+{
+	struct bspp_str_context *str_ctx = (struct bspp_str_context *)str_context_handle;
+	unsigned int i;
+	unsigned int sps_id;
+	unsigned int pps_id;
+	struct bspp_sequence_hdr_info *sequ_hdr_info;
+	struct bspp_pps_info *pps_info;
+	unsigned int result;
+
+	/* Validate input arguments. */
+	if (!str_context_handle) {
+		result = IMG_ERROR_INVALID_PARAMETERS;
+		goto error;
+	}
+
+	swsr_deinitialise(str_ctx->swsr_ctx.swsr_context);
+
+	/*
+	 * Service all the picture decoded events and free any unused
+	 * resources.
+	 */
+	bspp_service_pictures_decoded(str_ctx);
+	for (sps_id = 0; sps_id < SEQUENCE_SLOTS; sps_id++)
+		bspp_remove_unused_sequence(str_ctx, sps_id);
+
+	if (str_ctx->vid_std_features.uses_pps) {
+		for (pps_id = 0; pps_id < PPS_SLOTS; pps_id++)
+			bspp_remove_unused_pps(str_ctx, pps_id);
+	}
+
+	if (str_ctx->vid_std_features.uses_vps) {
+		struct bspp_vps_info *vps_info;
+
+		for (i = 0; i < VPS_SLOTS; ++i) {
+			vps_info = lst_removehead(&str_ctx->str_alloc.vps_data_list[i]);
+
+			if (vps_info)
+				lst_add(&str_ctx->str_alloc.available_vps_list, vps_info);
+
+			/*
+			 * when we are done with the stream we should have MAXIMUM 1 VPS
+			 * per slot, so after removing this one we should have none
+			 * In case of "decodenframes" this is not true because we send more
+			 * pictures for decode than what we expect to receive back, which
+			 * means that potentially additional sequences/PPS are in the list
+			 */
+			vps_info = lst_removehead(&str_ctx->str_alloc.vps_data_list[i]);
+			if (vps_info) {
+				do {
+					lst_add(&str_ctx->str_alloc.available_vps_list, vps_info);
+					vps_info =
+					lst_removehead(&str_ctx->str_alloc.vps_data_list[i]);
+				} while (vps_info);
+			}
+			VDEC_ASSERT(lst_empty(&str_ctx->str_alloc.vps_data_list[i]));
+		}
+
+		vps_info = NULL;
+		for (i = 0; i < MAX_VPSS; ++i) {
+			VDEC_ASSERT(!lst_empty(&str_ctx->str_alloc.available_vps_list));
+			vps_info = lst_removehead(&str_ctx->str_alloc.available_vps_list);
+			if (vps_info) {
+				kfree(vps_info->secure_vpsinfo);
+				kfree(vps_info);
+			} else {
+				VDEC_ASSERT(vps_info);
+				pr_err("vps still active at shutdown\n");
+			}
+		}
+		VDEC_ASSERT(lst_empty(&str_ctx->str_alloc.available_vps_list));
+	}
+
+	/* Free the memory required for this stream. */
+	for (i = 0; i < SEQUENCE_SLOTS; i++) {
+		sequ_hdr_info = lst_removehead(&str_ctx->str_alloc.sequence_data_list[i]);
+		if (sequ_hdr_info) {
+			if (str_ctx->parser_callbacks.release_data_cb)
+				str_ctx->parser_callbacks.release_data_cb
+					((void *)&str_ctx->str_alloc,
+					BSPP_UNIT_SEQUENCE,
+					sequ_hdr_info->secure_sequence_info);
+			lst_add(&str_ctx->str_alloc.available_sequence_list,
+				sequ_hdr_info);
+		}
+
+		/*
+		 * when we are done with the stream we should have MAXIMUM 1
+		 * sequence per slot, so after removing this one we should have
+		 * none In case of "decoded frames" this is not true because we
+		 * send more pictures for decode than what we expect to receive
+		 * back, which means that potentially additional sequences/PPS
+		 * are in the list
+		 */
+		sequ_hdr_info = lst_removehead(&str_ctx->str_alloc.sequence_data_list[i]);
+		if (sequ_hdr_info) {
+			unsigned int count_extra_sequences = 0;
+
+			do {
+				count_extra_sequences++;
+				if (str_ctx->parser_callbacks.release_data_cb) {
+					str_ctx->parser_callbacks.release_data_cb
+						((void *)&str_ctx->str_alloc,
+						 BSPP_UNIT_SEQUENCE,
+						 sequ_hdr_info->secure_sequence_info);
+				}
+				lst_add(&str_ctx->str_alloc.available_sequence_list,
+					sequ_hdr_info);
+				sequ_hdr_info =
+					lst_removehead(&str_ctx->str_alloc.sequence_data_list[i]);
+			} while (sequ_hdr_info);
+		}
+	}
+
+	if (str_ctx->vid_std_features.uses_pps) {
+		for (i = 0; i < PPS_SLOTS; i++) {
+			pps_info = lst_removehead(&str_ctx->str_alloc.pps_data_list[i]);
+			if (pps_info)
+				lst_add(&str_ctx->str_alloc.available_ppss_list, pps_info);
+
+			/*
+			 * when we are done with the stream we should have
+			 * MAXIMUM 1 PPS per slot, so after removing this one
+			 * we should have none
+			 * In case of "decodedframes" this is not true because
+			 * we send more pictures for decode than what we expect
+			 * to receive back, which means that potentially
+			 * additional sequences/PPS are in the list
+			 */
+			pps_info = lst_removehead(&str_ctx->str_alloc.pps_data_list[i]);
+			if (pps_info) {
+				unsigned int count_extra_ppss = 0;
+
+				do {
+					count_extra_ppss++;
+					lst_add(&str_ctx->str_alloc.available_ppss_list,
+						pps_info);
+					pps_info =
+					lst_removehead(&str_ctx->str_alloc.pps_data_list[i]);
+				} while (pps_info);
+			}
+		}
+	}
+
+	for (i = 0; i < MAX_SEQUENCES; i++) {
+		sequ_hdr_info = lst_removehead(&str_ctx->str_alloc.available_sequence_list);
+		if (sequ_hdr_info && str_ctx->parser_callbacks.destroy_data_cb)
+			str_ctx->parser_callbacks.destroy_data_cb
+				(BSPP_UNIT_SEQUENCE, sequ_hdr_info->secure_sequence_info);
+	}
+
+	kfree(str_ctx->secure_sequence_info);
+	str_ctx->secure_sequence_info = NULL;
+	kfree(str_ctx->sequ_hdr_info);
+	str_ctx->sequ_hdr_info = NULL;
+
+	if (str_ctx->vid_std_features.uses_pps) {
+		for (i = 0; i < MAX_PPSS; i++) {
+			pps_info = lst_removehead(&str_ctx->str_alloc.available_ppss_list);
+			if (pps_info && str_ctx->parser_callbacks.destroy_data_cb)
+				str_ctx->parser_callbacks.destroy_data_cb
+							(BSPP_UNIT_PPS, pps_info->secure_pps_info);
+		}
+
+		kfree(str_ctx->secure_pps_info);
+		str_ctx->secure_pps_info = NULL;
+		kfree(str_ctx->pps_info);
+		str_ctx->pps_info = NULL;
+	}
+
+	/* destroy mutex */
+	mutex_destroy(str_ctx->bspp_mutex);
+	kfree(str_ctx->bspp_mutex);
+	str_ctx->bspp_mutex = NULL;
+
+	kfree(str_ctx);
+
+	return IMG_SUCCESS;
+error:
+	return result;
+}
+
+/*
+ * @Function	bspp_set_codec_config
+ *
+ */
+int bspp_set_codec_config(const void *str_context_handle,
+			  const struct vdec_codec_config *codec_config)
+{
+	struct bspp_str_context *str_ctx = (struct bspp_str_context *)str_context_handle;
+	unsigned int result = IMG_SUCCESS;
+
+	/* Validate input arguments. */
+	if (!str_context_handle || !codec_config) {
+		result = IMG_ERROR_INVALID_PARAMETERS;
+		goto error;
+	}
+
+	switch (str_ctx->vid_std) {
+	default:
+		result = IMG_ERROR_NOT_SUPPORTED;
+		break;
+	}
+error:
+	return result;
+}
+
+/*
+ * @Function	bspp_stream_create
+ *
+ */
+int bspp_stream_create(const struct vdec_str_configdata *str_config_data,
+		       void **str_ctx_handle,
+		       struct bspp_ddbuf_array_info fw_sequence[],
+		       struct bspp_ddbuf_array_info fw_pps[])
+{
+	struct bspp_str_context *str_ctx;
+	unsigned int result = IMG_SUCCESS;
+	unsigned int i;
+	struct bspp_sequence_hdr_info *sequ_hdr_info;
+	struct bspp_pps_info *pps_info;
+	struct bspp_parse_state *parse_state;
+
+	/* Allocate a stream structure */
+	str_ctx = kmalloc(sizeof(*str_ctx), GFP_KERNEL);
+	if (!str_ctx) {
+		result = IMG_ERROR_OUT_OF_MEMORY;
+		goto error;
+	}
+	memset(str_ctx, 0, sizeof(*str_ctx));
+
+	/* Initialise the stream context structure. */
+	str_ctx->sequ_hdr_id = BSPP_INVALID;
+	str_ctx->vid_std = str_config_data->vid_std;
+	str_ctx->bstr_format = str_config_data->bstr_format;
+	str_ctx->disable_mvc = str_config_data->disable_mvc;
+	str_ctx->full_scan = str_config_data->full_scan;
+	str_ctx->immediate_decode = str_config_data->immediate_decode;
+	str_ctx->intra_frame_closed_gop = str_config_data->intra_frame_closed_gop;
+
+	parse_state = &str_ctx->parse_state;
+
+	/* Setup group buffer processing state. */
+	parse_state->inter_pict_ctx = &str_ctx->inter_pict_data;
+	parse_state->prev_bottom_pic_flag = (unsigned char)BSPP_INVALID;
+	parse_state->next_pic_is_new = 1;
+	parse_state->prev_frame_num = BSPP_INVALID;
+	parse_state->second_field_flag = 0;
+
+	lst_init(&str_ctx->grp_bstr_ctx.buffer_chain);
+
+	if (str_ctx->vid_std < VDEC_STD_MAX && parser_fxns[str_ctx->vid_std].set_parser_config) {
+		parser_fxns[str_ctx->vid_std].set_parser_config(str_ctx->bstr_format,
+			&str_ctx->vid_std_features,
+			&str_ctx->swsr_ctx,
+			&str_ctx->parser_callbacks,
+			&str_ctx->inter_pict_data);
+	} else {
+		result = IMG_ERROR_NOT_SUPPORTED;
+		goto error;
+	}
+
+	/* Allocate the memory required for this stream for Sequence/PPS info */
+	lst_init(&str_ctx->str_alloc.available_sequence_list);
+
+	str_ctx->sequ_hdr_info = kmalloc((MAX_SEQUENCES * sizeof(struct bspp_sequence_hdr_info)),
+					 GFP_KERNEL);
+	if (!str_ctx->sequ_hdr_info) {
+		result = IMG_ERROR_OUT_OF_MEMORY;
+		goto error;
+	}
+	memset(str_ctx->sequ_hdr_info, 0x00,
+	       (MAX_SEQUENCES * sizeof(struct bspp_sequence_hdr_info)));
+
+	str_ctx->secure_sequence_info =
+		kmalloc((MAX_SEQUENCES * str_ctx->vid_std_features.seq_size),
+			GFP_KERNEL);
+	if (!str_ctx->secure_sequence_info) {
+		result = IMG_ERROR_OUT_OF_MEMORY;
+		goto error;
+	}
+	memset(str_ctx->secure_sequence_info, 0x00,
+	       (MAX_SEQUENCES * str_ctx->vid_std_features.seq_size));
+
+	sequ_hdr_info = (struct bspp_sequence_hdr_info *)(str_ctx->sequ_hdr_info);
+	for (i = 0; i < MAX_SEQUENCES; i++) {
+		/* Deal with the device memory for FW SPS data */
+		sequ_hdr_info->fw_sequence = fw_sequence[i];
+		sequ_hdr_info->sequ_hdr_info.bufmap_id =
+			fw_sequence[i].ddbuf_info.bufmap_id;
+		sequ_hdr_info->sequ_hdr_info.buf_offset =
+			fw_sequence[i].buf_offset;
+		sequ_hdr_info->secure_sequence_info = (void *)(str_ctx->secure_sequence_info +
+			(i * str_ctx->vid_std_features.seq_size));
+
+		lst_add(&str_ctx->str_alloc.available_sequence_list,
+			sequ_hdr_info);
+		sequ_hdr_info++;
+	}
+
+	if (str_ctx->vid_std_features.uses_pps) {
+		lst_init(&str_ctx->str_alloc.available_ppss_list);
+		str_ctx->pps_info = kmalloc((MAX_PPSS * sizeof(struct bspp_pps_info)), GFP_KERNEL);
+		if (!str_ctx->pps_info) {
+			result = IMG_ERROR_OUT_OF_MEMORY;
+			goto error;
+		}
+		memset(str_ctx->pps_info, 0x00, (MAX_PPSS * sizeof(struct bspp_pps_info)));
+		str_ctx->secure_pps_info = kmalloc((MAX_PPSS * str_ctx->vid_std_features.pps_size),
+						   GFP_KERNEL);
+		if (!str_ctx->secure_pps_info) {
+			result = IMG_ERROR_OUT_OF_MEMORY;
+			goto error;
+		}
+		memset(str_ctx->secure_pps_info, 0x00,
+		       (MAX_PPSS * str_ctx->vid_std_features.pps_size));
+
+		pps_info = (struct bspp_pps_info *)(str_ctx->pps_info);
+		for (i = 0; i < MAX_PPSS; i++) {
+			/* Deal with the device memory for FW PPS data */
+			pps_info->fw_pps = fw_pps[i];
+			pps_info->bufmap_id = fw_pps[i].ddbuf_info.bufmap_id;
+			pps_info->buf_offset = fw_pps[i].buf_offset;
+
+			/*
+			 * We have no container for the PPS that passes down to the kernel,
+			 * for this reason the h264 secure parser needs to populate that
+			 * info into the picture header (Second)PictAuxData.
+			 */
+			pps_info->secure_pps_info = (void *)(str_ctx->secure_pps_info + (i *
+							str_ctx->vid_std_features.pps_size));
+
+			lst_add(&str_ctx->str_alloc.available_ppss_list, pps_info);
+			pps_info++;
+		}
+
+		/* As only standards that use PPS also use VUI, initialise
+		 * the appropriate data structures here.
+		 * Initialise the list of raw bitstream data containers.
+		 */
+		lst_init(&str_ctx->str_alloc.raw_data_list_available);
+		lst_init(&str_ctx->str_alloc.raw_data_list_used);
+	}
+
+	if (str_ctx->vid_std_features.uses_vps) {
+		struct bspp_vps_info *vps_info;
+
+		lst_init(&str_ctx->str_alloc.available_vps_list);
+		for (i = 0; i < MAX_VPSS; ++i) {
+			vps_info = kmalloc(sizeof(*vps_info), GFP_KERNEL);
+			VDEC_ASSERT(vps_info);
+			if (!vps_info) {
+				result = IMG_ERROR_OUT_OF_MEMORY;
+				goto error;
+			}
+
+			memset(vps_info, 0x00, sizeof(struct bspp_vps_info));
+			/*
+			 * for VPS we do not allocate device memory since (at least for now)
+			 * there is no need to pass any data from VPS directly to FW
+			 */
+			/* Allocate memory for BSPP local VPS data structure. */
+			vps_info->secure_vpsinfo =
+				kmalloc(str_ctx->vid_std_features.vps_size, GFP_KERNEL);
+
+			VDEC_ASSERT(vps_info->secure_vpsinfo);
+			if (!vps_info->secure_vpsinfo) {
+				result = IMG_ERROR_OUT_OF_MEMORY;
+				goto error;
+			}
+			memset(vps_info->secure_vpsinfo, 0, str_ctx->vid_std_features.vps_size);
+
+			lst_add(&str_ctx->str_alloc.available_vps_list, vps_info);
+		}
+	}
+
+	/* ... and initialise the lists that will use this data */
+	for (i = 0; i < SEQUENCE_SLOTS; i++)
+		lst_init(&str_ctx->str_alloc.sequence_data_list[i]);
+
+	if (str_ctx->vid_std_features.uses_pps)
+		for (i = 0; i < PPS_SLOTS; i++)
+			lst_init(&str_ctx->str_alloc.pps_data_list[i]);
+
+	str_ctx->bspp_mutex = kzalloc(sizeof(*str_ctx->bspp_mutex), GFP_KERNEL);
+	if (!str_ctx->bspp_mutex) {
+		result = -ENOMEM;
+		goto error;
+	}
+	mutex_init(str_ctx->bspp_mutex);
+
+	/* Initialise the software shift-register */
+	swsr_initialise(bspp_exception_handler, &str_ctx->parse_ctx,
+			(swsr_callback_fxn) bspp_shift_reg_cb,
+			&str_ctx->grp_bstr_ctx,
+			&str_ctx->swsr_ctx.swsr_context);
+
+	/* Setup the parse context */
+	str_ctx->parse_ctx.swsr_context = str_ctx->swsr_ctx.swsr_context;
+
+	*str_ctx_handle = str_ctx;
+
+	return IMG_SUCCESS;
+
+error:
+	if (str_ctx) {
+		kfree(str_ctx->sequ_hdr_info);
+		kfree(str_ctx->secure_sequence_info);
+		kfree(str_ctx->pps_info);
+		kfree(str_ctx->secure_pps_info);
+		kfree(str_ctx);
+	}
+
+	return result;
+}
+
+void bspp_freeraw_sei_datacontainer(const void *str_res,
+				    struct vdec_raw_bstr_data *rawsei_datacontainer)
+{
+	struct bspp_raw_sei_alloc *rawsei_alloc = NULL;
+
+	/* Check input params. */
+	if (str_res && rawsei_datacontainer) {
+		struct bspp_stream_alloc_data *alloc_data =
+			(struct bspp_stream_alloc_data *)str_res;
+
+		rawsei_alloc = container_of(rawsei_datacontainer,
+					    struct bspp_raw_sei_alloc,
+					    raw_sei_data);
+		memset(&rawsei_alloc->raw_sei_data, 0, sizeof(rawsei_alloc->raw_sei_data));
+		lst_remove(&alloc_data->raw_sei_alloc_list, rawsei_alloc);
+		kfree(rawsei_alloc);
+	}
+}
+
+void bspp_freeraw_sei_datalist(const void *str_res, struct vdec_raw_bstr_data *rawsei_datalist)
+{
+	/* Check input params. */
+	if (rawsei_datalist && str_res) {
+		struct vdec_raw_bstr_data *sei_raw_datacurr = NULL;
+
+		/* Start fromm the first element... */
+		sei_raw_datacurr = rawsei_datalist;
+		/* Free all the linked raw SEI data containers. */
+		while (sei_raw_datacurr) {
+			struct vdec_raw_bstr_data *seiraw_datanext =
+				sei_raw_datacurr->next;
+			bspp_freeraw_sei_datacontainer(str_res, sei_raw_datacurr);
+			sei_raw_datacurr = seiraw_datanext;
+		}
+	}
+}
+
+void bspp_streamrelese_rawbstrdataplain(const void *str_res, const void *rawdata)
+{
+	struct bspp_stream_alloc_data *str_alloc =
+		(struct bspp_stream_alloc_data *)str_res;
+	struct bspp_raw_bitstream_data *rawbstrdata =
+		(struct bspp_raw_bitstream_data *)rawdata;
+
+	if (rawbstrdata) {
+		/* Decrement the raw bitstream data reference count. */
+		rawbstrdata->ref_count--;
+		/* If no entity is referencing the raw
+		 * bitstream data any more
+		 */
+		if (rawbstrdata->ref_count == 0) {
+			/* ... free the raw bistream data buffer... */
+			kfree(rawbstrdata->raw_bitstream_data.data);
+			memset(&rawbstrdata->raw_bitstream_data, 0,
+			       sizeof(rawbstrdata->raw_bitstream_data));
+			/* ...and return it to the list. */
+			lst_remove(&str_alloc->raw_data_list_used, rawbstrdata);
+			lst_add(&str_alloc->raw_data_list_available, rawbstrdata);
+		}
+	}
+}
+
+struct bspp_vps_info *bspp_get_vpshdr(void *str_res, unsigned int vps_id)
+{
+	struct bspp_stream_alloc_data *alloc_data =
+		(struct bspp_stream_alloc_data *)str_res;
+
+	if (vps_id >= VPS_SLOTS || !alloc_data)
+		return NULL;
+
+	return lst_last(&alloc_data->vps_data_list[vps_id]);
+}
diff --git a/drivers/staging/media/vxd/decoder/bspp.h b/drivers/staging/media/vxd/decoder/bspp.h
new file mode 100644
index 000000000000..2198d9d6966e
--- /dev/null
+++ b/drivers/staging/media/vxd/decoder/bspp.h
@@ -0,0 +1,363 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * VXD Bitstream Buffer Pre-Parser
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ *	Lakshmi Sankar <lakshmisankar-t@ti.com>
+ * Re-written for upstreming
+ *	Prashanth Kumar Amai <prashanth.ka@pathpartnertech.com>
+ *	Sidraya Jayagond <sidraya.bj@pathpartnertech.com>
+ */
+#ifndef __BSPP_H__
+#define __BSPP_H__
+
+#include <linux/types.h>
+
+#include "h264fw_data.h"
+#include "lst.h"
+#include "vdec_defs.h"
+
+/*
+ * There are up to 2 pictures in each buffer
+ * (plus trailing data for the next picture, e.g. PPS).
+ */
+#define BSPP_MAX_PICTURES_PER_BUFFER 3
+
+#define BSPP_INVALID ((unsigned int)(-1))
+
+/*
+ * This enables signalling of closed gop at every I frame. Add resilience to
+ * seeking functionality.
+ */
+#define I_FRAME_SIGNALS_CLOSED_GOP
+
+/*
+ * enum bspp_error_type - enumeration of parsing error , different error flag
+ *	for different data unit
+ */
+enum bspp_error_type {
+	/* No Error in parsing. */
+	BSPP_ERROR_NONE                      = (0),
+	/* Correction in VSH, Replaced VSH with faulty one */
+	BSPP_ERROR_CORRECTION_VSH            = (1 << 0),
+	/*
+	 * Correction in parsed Value, clamp the value if it goes beyond
+	 * the limit
+	 */
+	BSPP_ERROR_CORRECTION_VALIDVALUE     = (1 << 1),
+	/* Error in Aux data (i.e. PPS in H.264) parsing */
+	BSPP_ERROR_AUXDATA                   = (1 << 2),
+	/* Error in  parsing, more data remains in VSH data unit after parsing */
+	BSPP_ERROR_DATA_REMAINS              = (1 << 3),
+	/* Error in  parsing, parsed codeword is invalid */
+	BSPP_ERROR_INVALID_VALUE             = (1 << 4),
+	/* Error in  parsing, parsing error */
+	BSPP_ERROR_DECODE                    = (1 << 5),
+	/* reference frame is not available for decoding */
+	BSPP_ERROR_NO_REF_FRAME              = (1 << 6),
+	/* Non IDR frame loss detected */
+	BSPP_ERROR_NONIDR_FRAME_LOSS         = (1 << 7),
+	/* IDR frame loss detected */
+	BSPP_ERROR_IDR_FRAME_LOSS            = (1 << 8),
+	/* Error in  parsing, insufficient data to complete parsing */
+	BSPP_ERROR_INSUFFICIENT_DATA         = (1 << 9),
+	/* Severe Error, Error indicates, no support for this picture data */
+	BSPP_ERROR_UNSUPPORTED               = (1 << 10),
+	/* Severe Error, Error in which could not be recovered */
+	BSPP_ERROR_UNRECOVERABLE             = (1 << 11),
+	/* Severe Error, to indicate that NAL Header is absent after SCP */
+	BSPP_ERROR_NO_NALHEADER              = (1 << 12),
+	BSPP_ERROR_NO_SEQUENCE_HDR           = (1 << 13),
+	BSPP_ERROR_SIGNALED_IN_STREAM        = (1 << 14),
+	BSPP_ERROR_UNKNOWN_DATAUNIT_DETECTED = (1 << 15),
+	BSPP_ERROR_NO_PPS                    = (1 << 16),
+	BSPP_ERROR_NO_VPS                    = (1 << 17),
+	BSPP_ERROR_OUT_OF_MEMORY             = (1 << 18),
+	/* The shift value of the last error bit */
+	BSPP_ERROR_MAX_SHIFT                 = 18,
+	BSPP_ERROR_FORCE32BITS               = 0x7FFFFFFFU
+};
+
+/*
+ * struct bspp_ddbuf_info - Buffer info
+ * @buf_size: The size of the buffer (in bytes)
+ * @cpu_virt_addr: The CPU virtual address  (mapped into the local cpu MMU)
+ * @mem_attrib: Memory attributes
+ * @bufmap_id: buffer mappind id
+ */
+struct bspp_ddbuf_info {
+	unsigned int buf_size;
+	void *cpu_virt_addr;
+	enum sys_emem_attrib mem_attrib;
+	unsigned int buf_id;
+	unsigned int bufmap_id;
+};
+
+/*
+ * struct bspp_ddbuf_array_info - Buffer array info
+ * @ddbuf_info: Buffer info (container)
+ * @buf_element_size: Size of each element
+ * @buf_offset: Offset for each element
+ */
+struct bspp_ddbuf_array_info {
+	struct bspp_ddbuf_info ddbuf_info;
+	unsigned int buf_element_size;
+	unsigned int buf_offset;
+};
+
+/**
+ * struct bspp_bitstr_seg - Bitstream segment
+ * @lst_padding:
+ * @data_size: Size of data
+ * @data_byte_offset: Offset for data
+ * @bstr_seg_flag: flag indicates the bitstream segment type
+ * @start_code_suffix: start code prefix
+ * @bufmap_id: Buffer map ID
+ */
+struct bspp_bitstr_seg {
+	void *lst_padding;
+	unsigned int data_size;
+	unsigned int data_byte_offset;
+	unsigned int bstr_seg_flag;
+	unsigned char start_code_suffix;
+	unsigned int bufmap_id;
+};
+
+/*
+ * struct bspp_pict_data - Picture Header Data Information
+ * @bufmap_id: Buffer ID to use inside kernel #VXDIO_sDdBufInfo
+ * @buf_offset: Buffer offset (for packed device buffers, e.g. PPS)
+ * @pic_data: Picture data
+ * @size: Size (in bytes) of data.
+ * @data_id: Data identifier.
+ */
+struct bspp_pict_data {
+	unsigned int bufmap_id;
+	unsigned int buf_offset;
+	void *pic_data;
+	unsigned int size;
+	unsigned int id;
+};
+
+/*
+ * struct bspp_pict_hdr_info - Picture Header Information
+ */
+struct bspp_pict_hdr_info {
+	/*
+	 * Picture is entirely intra-coded and doesn't use any reference data.
+	 * NOTE: should be IMG_FALSE if this cannot be determined.
+	 */
+	int intra_coded;
+	/* Picture might be referenced by subsequent pictures. */
+	int ref;
+	/* Picture is a field as part of a frame. */
+	int field;
+	/* Emulation prevention bytes are present in picture data. */
+	int emulation_prevention;
+	/* Post Processing */
+	int post_processing;
+	/* Macroblocks within the picture may not occur in raster-scan order */
+	int discontinuous_mbs;
+	/* Flag to indicate data is span across mulitple buffer. */
+	int fragmented_data;
+	/* SOS fields count value */
+	unsigned char sos_count;
+	/* This picture is the first of the sequence or not */
+	int first_pic_of_sequence;
+
+	enum vdecfw_parsermode parser_mode;
+	/* Total size of picture data which is going to be submitted. */
+	unsigned int pic_data_size;
+	/* Size of coded frame as specified in the bitstream. */
+	struct vdec_pict_size coded_frame_size;
+	/* Display information for picture */
+	struct vdec_pict_disp_info disp_info;
+
+	/* Picture auxiliary data (e.g. H.264 SPS/PPS) */
+	struct bspp_pict_data pict_aux_data;
+	/* Picture auxiliary data (e.g. H.264 SPS/PPS) for 2nd picture */
+	struct bspp_pict_data second_pict_aux_data;
+	/* Slice group-map data. */
+	struct bspp_pict_data pict_sgm_data;
+#ifdef HAS_JPEG
+	/* JPEG specific picture header information.*/
+	struct vdec_jpeg_pict_hdr_info jpeg_pict_hdr_info;
+#endif
+
+	struct h264_pict_hdr_info {
+		void *raw_vui_data;
+		void *raw_sei_data_list_first_field;
+		void *raw_sei_data_list_second_field;
+		unsigned char nal_ref_idc;
+		unsigned short frame_num;
+	} h264_pict_hdr_info;
+
+	struct {        /* HEVC specific frame information.*/
+		int range_ext_present;
+		int is_full_range_ext;
+		void *raw_vui_data;
+		void *raw_sei_datalist_firstfield;
+		void *raw_sei_datalist_secondfield;
+	} hevc_pict_hdr_info;
+};
+
+/*
+ * struct bspp_sequ_hdr_info - Sequence header information
+ */
+struct bspp_sequ_hdr_info {
+	unsigned int sequ_hdr_id;
+	unsigned int ref_count;
+	struct vdec_comsequ_hdrinfo com_sequ_hdr_info;
+	unsigned int bufmap_id;
+	unsigned int buf_offset;
+};
+
+/*
+ * struct bspp_picture_data - Picture data
+ */
+struct bspp_picture_data {
+	/* Anonymous */
+	/*
+	 * Bitstream segments that contain other (non-picture) data before
+	 * the picture in the buffer (elements of type #VDECDD_sBitStrSeg).
+	 */
+	struct lst_t pre_pict_seg_list[BSPP_MAX_PICTURES_PER_BUFFER];
+	/* Picture */
+	unsigned int sequ_hdr_id;
+	struct bspp_pict_hdr_info pict_hdr_info;
+	/*
+	 * Bitstream segments that contain picture data, one for each field
+	 * (if present in same group of buffers (elements of type
+	 * #VDECDD_sBitStrSeg).
+	 */
+	struct lst_t pict_seg_list[BSPP_MAX_PICTURES_PER_BUFFER];
+	void *pict_tag_param[BSPP_MAX_PICTURES_PER_BUFFER];
+	int is_prefix;
+};
+
+/*
+ * struct bspp_preparsed_data - Pre-parsed buffer information
+ */
+struct bspp_preparsed_data {
+	/* Sequence */
+	int new_sequence;
+	struct bspp_sequ_hdr_info sequ_hdr_info;
+	int sequence_end;
+
+	/* Closed GOP */
+	int closed_gop;
+
+	/* Picture */
+	int new_picture;
+	int new_fragment;
+	struct bspp_picture_data picture_data;
+
+	/* Additional pictures (MVC extension) */
+	int new_sub_sequence;
+	struct bspp_sequ_hdr_info ext_sequ_hdr_info;
+	/* non-base view pictures + picture prefix for next frame */
+	struct bspp_picture_data ext_pictures_data[VDEC_H264_MVC_MAX_VIEWS];
+	unsigned int num_ext_pictures;
+
+	/*
+	 * Additional information
+	 * Flags word to indicate error in parsing/decoding - see
+	 * #VDEC_eErrorType
+	 */
+	unsigned int error_flags;
+};
+
+/*
+ * struct bspp_picture_decoded - used to store picture-decoded information for
+ * resource handling (sequences/PPSs)
+ */
+struct bspp_picture_decoded {
+	void **lst_link;
+	unsigned int sequ_hdr_id;
+	unsigned int pps_id;
+	unsigned int second_pps_id;
+	int not_decoded;
+	struct vdec_raw_bstr_data *sei_raw_data_first_field;
+	struct vdec_raw_bstr_data *sei_raw_data_second_field;
+};
+
+/*
+ * @Function	bspp_stream_create
+ * @Description	Creates a stream context for which to pre-parse bitstream
+ *		buffers. The following allocations will take place:
+ *		- Local storage for high-level header parameters (secure)
+ *		- Host memory for common sequence information (insecure)
+ *		- Device memory for Sequence information (secure)
+ *		- Device memory for PPS (secure, H.264 only)
+ * @Input	vdec_str_configdata : config data corresponding to bitstream
+ * @Output	str_context : A pointer used to return the stream context handle
+ * @Input	fw_sequ: FW sequence data
+ * @Input	fw_pps: FW pps data
+ * @Return	This function returns either IMG_SUCCESS or an error code.
+ */
+int bspp_stream_create(const struct vdec_str_configdata *str_config_data,
+		       void **str_context,
+		       struct bspp_ddbuf_array_info fw_sequ[],
+		       struct bspp_ddbuf_array_info fw_pps[]);
+
+/*
+ * @Function	bspp_set_codec_config
+ * @Description	This function is used to set the out-of-band codec config data.
+ * @Input	str_context_handle     : Stream context handle.
+ * @Input	codec_config   : Codec-config data
+ * @Return	This function returns either IMG_SUCCESS or an error code.
+ */
+int bspp_set_codec_config(const void *str_context_handle,
+			  const struct vdec_codec_config *codec_config);
+
+/*
+ * @Function	bspp_stream_destroy
+ * @Description	Destroys a stream context used to pre-parse bitstream buffers.
+ * @Input	str_context_handle  : Stream context handle.
+ * @Return	This function returns either IMG_SUCCESS or an error code.
+ */
+int bspp_stream_destroy(void *str_context_handle);
+
+/*
+ * @Function	bspp_submit_picture_decoded
+ */
+int bspp_submit_picture_decoded(void *str_context_handle,
+				struct bspp_picture_decoded *picture_decoded);
+
+/*
+ * @Function	bspp_stream_submit_buffer
+ */
+int bspp_stream_submit_buffer(void *str_context_handle,
+			      const struct bspp_ddbuf_info *ddbuf_info,
+			      unsigned int bufmap_id,
+			      unsigned int data_size,
+			      void *pict_tag_param,
+			      enum vdec_bstr_element_type bstr_element_type);
+
+/*
+ * @Function	bspp_stream_preparse_buffers
+ * @Description	Pre-parses bistream buffer and returns picture information in
+ *		structure that also signals when the buffer is last in picture.
+ * @Input	str_context_handle: Stream context handle.
+ * @Input	contiguous_buf_info : Contiguous buffer information
+ *		multiple segments that may be non contiguous in memory
+ * @Input	contiguous_buf_map_id : Contiguous Buffer Map id
+ * @Input	segments: Pointer to a list of segments (see #VDECDD_sBitStrSeg)
+ * @Output	preparsed_data: Container to return picture information. Only
+ *		provide when buffer is last in picture (see #bForceEop in
+ *		function #VDEC_StreamSubmitBstrBuf)
+ * @Output	eos_flag: flag indicates end of stream
+ * @Return	int : This function returns either IMG_SUCCESS or an error code.
+ */
+int bspp_stream_preparse_buffers
+	(void *str_context_handle,
+	const struct bspp_ddbuf_info *contiguous_buf_info,
+	unsigned int contiguous_buf_map_id,
+	struct lst_t *segments,
+	struct bspp_preparsed_data *preparsed_data,
+	int eos_flag);
+
+#endif /* __BSPP_H__   */
diff --git a/drivers/staging/media/vxd/decoder/bspp_int.h b/drivers/staging/media/vxd/decoder/bspp_int.h
new file mode 100644
index 000000000000..e37c8c9c415b
--- /dev/null
+++ b/drivers/staging/media/vxd/decoder/bspp_int.h
@@ -0,0 +1,514 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * VXD Bitstream Buffer Pre-Parser Internal
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ *	Amit Makani <amit.makani@ti.com>
+ *
+ * Re-written for upstreamimg
+ *	Sidraya Jayagond <sidraya.bj@pathpartnertech.com>
+ */
+#ifndef __BSPP_INT_H__
+#define __BSPP_INT_H__
+
+#include "bspp.h"
+#include "swsr.h"
+
+#define VDEC_MB_DIMENSION  (16)
+#define MAX_COMPONENTS  (4)
+
+#define print_value(a, ...)
+
+#define BSPP_DEFAULT_SEQUENCE_ID   (0)
+
+enum bspp_unit_type {
+	BSPP_UNIT_NONE = 0,
+	/* Only relevant for HEVC. */
+	BSPP_UNIT_VPS,
+	/* Only relevant for h.264 and HEVC */
+	BSPP_UNIT_SEQUENCE, BSPP_UNIT_PPS,
+	/*
+	 * !< Data from these units should be placed in non-picture bitstream
+	 *  segment lists. In conformant streams these units should not occur
+	 *  in-between the picture data.
+	 */
+	BSPP_UNIT_PICTURE,
+	BSPP_UNIT_SKIP_PICTURE,
+	BSPP_UNIT_NON_PICTURE,
+	BSPP_UNIT_UNCLASSIFIED,
+	/* Unit is unsupported, don't change segment list */
+	BSPP_UNIT_UNSUPPORTED,
+	BSPP_UNIT_MAX,
+	BSPP_UNIT_FORCE32BITS = 0x7FFFFFFFU
+};
+
+struct bspp_raw_bitstream_data {
+	void **lst_link;
+	unsigned int ref_count;
+	struct vdec_raw_bstr_data raw_bitstream_data;
+};
+
+/*
+ * struct bspp_h264_inter_pict_ctx
+ * @Brief: This structure contains H264 state to be retained between pictures.
+ */
+struct bspp_h264_inter_pict_ctx {
+	/*
+	 *  The following get applied to every picture until updated
+	 * (bitstream properties)
+	 */
+	int disable_vdmc_filt;
+	int b4x4transform_mb_unavailable;
+	/*
+	 *  The following get applied to the next picture only
+	 * (picture properties)
+	 */
+	int repeat_first_field;
+	unsigned int max_frm_repeat;
+	/*
+	 *  Control variable to decide when to attach the SEI info
+	 * (picture properties) to a picture
+	 */
+	int sei_info_attached_to_pic;
+	/*
+	 * The following variable is an approximation because we cannot
+	 * parse out-of-order, it takes value as described:
+	 *	 1) Initially it is BSPP_INVALID
+	 *	 2) The first SPS sets it to its SPSid
+	 *	 3) The last bspp_H264SeiBufferingPeriod sets it, and it is used
+	 * for every SEI parsing until updated by another
+	 * bspp_H264SeiBufferingPeriod message
+	 */
+	unsigned int active_sps_for_sei_parsing;
+	unsigned short current_view_id;
+	struct vdec_raw_bstr_data *sei_raw_data_list;
+};
+
+/* This structure contains HEVC state to be retained between pictures. */
+struct bspp_hevc_inter_pict_ctx {
+	/* Picture count in a sequence */
+	unsigned int seq_pic_count;
+	struct {
+		/* There was EOS NAL detected and no new picture yet */
+		unsigned eos_detected : 1;
+		/* This is first picture after EOS NAL */
+		unsigned first_after_eos : 1;
+	};
+
+	/* control variable to decide when to attach the SEI info
+	 * (picture properties) to a picture.
+	 */
+	unsigned char sei_info_attached_to_pic;
+	/* Raw SEI list to be attached to a picture. */
+	struct vdec_raw_bstr_data *sei_rawdata_list;
+	/* Handle to a picture header field to attach the raw SEI list to. */
+	void **hndl_pichdr_sei_rawdata_list;
+};
+
+/*
+ * struct bspp_inter_pict_data
+ * @Brief	This structure contains state to be retained between pictures.
+ */
+struct bspp_inter_pict_data {
+	/* A closed GOP has occurred in the bitstream. */
+	int seen_closed_gop;
+	/* Closed GOP has been signaled by a unit before the next picture */
+	int new_closed_gop;
+	/* Indicates whether or not DPB flush is needed */
+	int not_dpb_flush;
+	struct lst_t pic_prefix_seg;
+	union {
+		struct bspp_h264_inter_pict_ctx h264_ctx;
+		struct bspp_hevc_inter_pict_ctx hevc_ctx;
+	};
+};
+
+/*
+ * struct bspp_parse_state
+ * @Brief	This structure contains parse state
+ */
+struct bspp_parse_state {
+	struct bspp_inter_pict_data *inter_pict_ctx;
+	int initialised;
+
+	/* Input/Output (H264 etc. state). */
+	/* For SCP ASO detection we need to log 3 components */
+	unsigned int prev_first_mb_in_slice[MAX_COMPONENTS];
+	struct bspp_pict_hdr_info *next_pict_hdr_info;
+	unsigned char prev_bottom_pic_flag;
+	unsigned char second_field_flag;
+	unsigned char next_pic_is_new;
+	unsigned int prev_frame_num;
+	unsigned int prev_pps_id;
+	unsigned int prev_field_pic_flag;
+	unsigned int prev_nal_ref_idc;
+	unsigned int prev_pic_order_cnt_lsb;
+	int prev_delta_pic_order_cnt_bottom;
+	int prev_delta_pic_order_cnt[2];
+	int prev_nal_unit_type;
+	int prev_idr_pic_id;
+	int discontinuous_mb;
+	/* Position in bitstream before parsing a unit */
+	unsigned long long prev_byte_offset_buf;
+	unsigned int prev_buf_map_id;
+	unsigned int prev_buf_data_size;
+	/*
+	 * !< Flags word to indicate error in parsing/decoding
+	 * - see #VDEC_eErrorType.
+	 */
+	unsigned int error_flags;
+	/* Outputs. */
+	int new_closed_gop;
+	unsigned char new_view;
+	unsigned char is_prefix;
+	int first_chunk;
+};
+
+/*
+ * struct bspp_pps_info
+ * @Brief	Contains PPS information
+ */
+struct bspp_pps_info {
+	void **lst_link;
+	/* PPS Id. INSECURE MEMORY HOST */
+	unsigned int pps_id;
+	/* Reference count for PPS. INSECURE MEMORY HOST */
+	unsigned int ref_count;
+	struct bspp_ddbuf_array_info fw_pps;
+	/* Buffer ID to be used in Kernel */
+	unsigned int bufmap_id;
+	/* Parsing Info.    SECURE MEMORY HOST   */
+	void *secure_pps_info;
+	/* Buffer Offset to be used in kernel */
+	unsigned int buf_offset;
+};
+
+/*
+ * struct bspp_sequence_hdr_info
+ * @Brief	Contains SPS information
+ */
+struct bspp_sequence_hdr_info {
+	void **lst_link;
+	/* Reference count for sequence header */
+	unsigned int ref_count;
+	struct bspp_sequ_hdr_info sequ_hdr_info;
+	struct bspp_ddbuf_array_info fw_sequence;
+	/* Parsing Info.  SECURE MEMORY HOST */
+	void *secure_sequence_info;
+};
+
+enum bspp_element_status {
+	BSPP_UNALLOCATED = 0,
+	BSPP_AVAILABLE,
+	BSPP_UNAVAILABLE,
+	BSPP_STATUSMAX,
+	BSPP_FORCE32BITS = 0x7FFFFFFFU
+};
+
+struct bspp_vps_info {
+	void **lst_link;
+	/* VPS Id   INSECURE MEMORY HOST */
+	unsigned int vps_id;
+	/* Reference count for video header. INSECURE MEMORY HOST */
+	unsigned int ref_count;
+	/*!< Parsing Info. SECURE MEMORY HOST */
+	void *secure_vpsinfo;
+};
+
+/*
+ * struct bspp_unit_data
+ * @Brief	Contains bitstream unit data
+ */
+struct bspp_unit_data {
+	/* Input. */
+	/* Indicates which output data to populate */
+	enum bspp_unit_type unit_type;
+	/* Video Standard of unit to parse */
+	enum vdec_vid_std vid_std;
+	/* Indicates whether delimiter is present for unit */
+	int delim_present;
+	/* Codec configuration used by this stream */
+	const struct vdec_codec_config *codec_config;
+	void *str_res_handle;
+	/* Needed for calculating the size of the last fragment */
+	unsigned int unit_data_size;
+	/* Input/Output. */
+	struct bspp_parse_state *parse_state;
+	/* Output */
+	/* eVidStd == VDEC_STD_H263 && BSPP_UNIT_PICTURE. */
+	struct bspp_sequence_hdr_info *impl_sequ_hdr_info;
+	/* Union of output data for each of the unit types. */
+	union {
+		/* BSPP_UNIT_SEQUENCE. */
+		struct bspp_sequence_hdr_info *sequ_hdr_info;
+		/* BSPP_UNIT_PPS. */
+		struct bspp_pps_info *pps_info;
+		/* BSPP_UNIT_PICTURE. */
+		struct bspp_pict_hdr_info *pict_hdr_info;
+		/* For Video Header (HEVC) */
+		struct bspp_vps_info *vps_info;
+	} out;
+
+	/*
+	 * For picture it should give the SequenceHdrId, for anything
+	 * else it should contain BSPP_INVALID. This value is pre-loaded
+	 * with the sequence ID of the last picture.
+	 */
+	unsigned int pict_sequ_hdr_id;
+	/* State: output. */
+	/*
+	 * Picture unit (BSPP_UNIT_PICTURE) contains slice data.
+	 * Picture header information must be populated once this unit has been
+	 * parsed.
+	 */
+	int slice;
+	int ext_slice; /* Current slice belongs to non-base view (MVC only) */
+	/*
+	 * True if we meet a unit that signifies closed gop, different
+	 * for each standard.
+	 */
+	int new_closed_gop;
+	/* True if the end of a sequence of pictures has been reached. */
+	int sequence_end;
+	/*
+	 * Extracted all data from unit whereby shift-register should now
+	 * be at the next delimiter or end of data (when byte-aligned).
+	 */
+	int extracted_all_data;
+	/* Indicates the presence of any errors while processing this unit. */
+	enum bspp_error_type parse_error;
+	/* To turn on/off considering I-Frames as ClosedGop boundaries. */
+	int intra_frm_as_closed_gop;
+};
+
+/*
+ * struct bspp_swsr_ctx
+ * @brief	BSPP Software Shift Register Context Information
+ */
+struct bspp_swsr_ctx {
+	/*
+	 * Default configuration for the shift-register for this
+	 * stream. The delimiter type may be adjusted for each unit
+	 * where the buffer requires it. Information about how to
+	 * process each unit will be passed down with the picture
+	 * header information.
+	 */
+	struct swsr_config sr_config;
+	/*
+	 * Emulation prevention scheme present in bitstream. This is
+	 * sometimes not ascertained (e.g. VC-1) until the first
+	 * bitstream buffer (often codec configuration) has been
+	 * received.
+	 */
+	enum swsr_emprevent emulation_prevention;
+	/* Software shift-register context. */
+	void *swsr_context;
+};
+
+/*
+ * struct bspp_vid_std_features
+ * @brief  BSPP Video Standard Specific Features and Information
+ */
+struct bspp_vid_std_features {
+	/* The size of the sequence header structure for this video standard */
+	unsigned long seq_size;
+	/* This video standard uses Picture Parameter Sets. */
+	int uses_pps;
+	/*
+	 * The size of the Picture Parameter Sets structure for
+	 * this video standard.
+	 */
+	unsigned long pps_size;
+	/* This video standard uses Video Parameter Sets. */
+	int uses_vps;
+	/*
+	 * The size of the Video Parameter Sets structure for
+	 * this video standard
+	 */
+	unsigned long vps_size;
+};
+
+/*
+ * @Function	bspp_cb_parse_unit
+ * @Description	Function prototype for the parse unit callback functions.
+ * @Input	swsr_context_handle: A handle to software shift-register context
+ * @InOut	unit_data: A pointer to unit data which includes input & output
+ *		parameters as defined by structure.
+ * @Return	int : This function returns either IMG_SUCCESS or an error code.
+ */
+typedef int (*bspp_cb_parse_unit)(void *swsr_context_handle,
+				    struct bspp_unit_data *unit_data);
+
+/*
+ * @Function	bspp_pfnReleaseData
+ * @Description	This is a function prototype for the data releasing callback
+ *		functions.
+ * @Input	str_alloc_handle   : A handle to stream related resources.
+ * @Input	data_type   : A type of data which is to be released.
+ * @Input	data_handle : A handle for data which is to be released.
+ * @Return	int : This function returns either IMG_SUCCESS or an error code.
+ */
+typedef int (*bspp_cb_release_data)(void *str_alloc_handle,
+				      enum bspp_unit_type data_type,
+				      void *data_handle);
+
+/*
+ * @Function	bspp_cb_reset_data
+ * @Description	This is a function prototype for the data resetting callback
+ *		functions.
+ * @Input	data_type   : A type of data which is to be reset.
+ * @InOut	data_handle : A handle for data which is to be reset.
+ * @Return	int : This function returns either IMG_SUCCESS or an error code.
+ */
+typedef int (*bspp_cb_reset_data)(enum bspp_unit_type data_type,
+				    void *data_handle);
+
+/*
+ * @Function	bspp_cb_destroy_data
+ * @Description	This is a function prototype for the data destruction callback
+ *		functions.
+ * @Input	data_type   : A type of data which is to be destroyed.
+ * @InOut	data_handle : A handle for data which is to be destroyed.
+ * @Return	int : This function returns either IMG_SUCCESS or an error code.
+ */
+typedef int (*bspp_cb_destroy_data)(enum bspp_unit_type data_type,
+				      void *data_handle);
+
+/*
+ * @Function	bspp_cb_parse_codec_config
+ * @Description	This is a function prototype for parsing codec config bitstream
+ *		element for size delimited bitstreams.
+ * @Input	swsr_context_handle: A handle to Shift Register processing
+ *		current bitstream.
+ * @Output	unit_count: A pointer to variable in which to return unit count.
+ * @Output	unit_array_count: A pointer to variable in which to return unit
+ *		array count.
+ * @Output	delim_length: A pointer to variable in which to return NAL
+ *		delimiter length in bits.
+ * @Output	size_delim_length: A pointer to variable in which to return size
+ *		delimiter length in bits.
+ * @Return	None.
+ */
+typedef void (*bspp_cb_parse_codec_config)(void *swsr_context_handle,
+					   unsigned int *unit_count,
+					   unsigned int *unit_array_count,
+					   unsigned int *delim_length,
+					   unsigned int *size_delim_length);
+
+/*
+ * @Function	bspp_cb_update_unit_counts
+ * @Description	This is a function prototype for updating unit counts for size
+ *		delimited bitstreams.
+ * @Input	swsr_context_handle: A handle to Shift Register processing
+ *		current bitstream.
+ * @InOut	unit_count: A pointer to variable holding current unit count
+ * @InOut	unit_array_count: A pointer to variable holding current unit
+ *		array count.
+ * @Return	None.
+ */
+typedef void (*bspp_cb_update_unit_counts)(void *swsr_context_handle,
+					   unsigned int *unit_count,
+					   unsigned int *unit_array_count);
+
+/*
+ * @Function	bspp_cb_initialise_parsing
+ * @Description	This prototype is for unit group parsing initialization.
+ * @InOut	parse_state: The current unit group parsing state.
+ * @Return	None.
+ */
+typedef void (*bspp_cb_initialise_parsing)(struct bspp_parse_state *prs_state);
+
+/*
+ * @Function	bspp_cb_finalise_parsing
+ * @Description	This is prototype is for unit group parsing finalization.
+ * @Input	str_alloc_handle: A handle to stream related resources.
+ * @InOut	parse_state: The current unit group parsing state.
+ * @Return	None.
+ */
+typedef void (*bspp_cb_finalise_parsing)(void *str_alloc_handle,
+					 struct bspp_parse_state *parse_state);
+
+/*
+ * struct bspp_parser_callbacks
+ * @brief	BSPP Standard Related Parser Callback Functions
+ */
+struct bspp_parser_callbacks {
+	/* Pointer to standard-specific unit parsing callback function. */
+	bspp_cb_parse_unit parse_unit_cb;
+	/* Pointer to standard-specific data releasing callback function. */
+	bspp_cb_release_data release_data_cb;
+	/* Pointer to standard-specific data resetting callback function. */
+	bspp_cb_reset_data reset_data_cb;
+	/* Pointer to standard-specific data destruction callback function. */
+	bspp_cb_destroy_data destroy_data_cb;
+	/* Pointer to standard-specific codec config parsing callback function */
+	bspp_cb_parse_codec_config parse_codec_config_cb;
+	/* Pointer to standard-specific unit count updating callback function */
+	bspp_cb_update_unit_counts update_unit_counts_cb;
+	/*
+	 * Pointer to standard-specific unit group parsing initialization
+	 * function.
+	 */
+	bspp_cb_initialise_parsing initialise_parsing_cb;
+	/*
+	 * Pointer to standard-specific unit group parsing finalization
+	 * function
+	 */
+	bspp_cb_finalise_parsing finalise_parsing_cb;
+};
+
+/*
+ * @Function	bspp_cb_set_parser_config
+ * @Description	Prototype is for the setting parser config callback functions.
+ * @Input	bstr_format: Input bitstream format.
+ * @Output	vid_std_features: Features of video standard for this bitstream.
+ * @Output	swsr_ctx: Software Shift Register settings for this bitstream.
+ * @Output	parser_callbacks: Parser functions to be used for parsing this
+ *		bitstream.
+ * @Output	inter_pict_data: Inter-picture settings specific for this
+ *		bitstream.
+ * @Return	int : This function returns either IMG_SUCCESS or an error code.
+ */
+typedef int (*bspp_cb_set_parser_config)(enum vdec_bstr_format bstr_format,
+					   struct bspp_vid_std_features *vid_std_features,
+					   struct bspp_swsr_ctx *swsr_ctx,
+					   struct bspp_parser_callbacks *parser_callbacks,
+					   struct bspp_inter_pict_data *inter_pict_data);
+
+/*
+ * @Function	bspp_cb_determine_unit_type
+ * @Description	This is a function prototype for determining the BSPP unit type
+ *		based on the bitstream (video standard specific) unit type
+ *		callback functions.
+ * @Input	bitstream_unit_type: Bitstream (video standard specific) unit
+ *		type.
+ * @Input	disable_mvc: Skip MVC related units (relevant for standards
+ *		that support it).
+ * @InOut	bspp_unit_type *: Last BSPP unit type on input. Current BSPP
+ *		unit type on output.
+ * @Return	None.
+ */
+typedef void (*bspp_cb_determine_unit_type)(unsigned char bitstream_unit_type,
+					    int disable_mvc,
+					    enum bspp_unit_type *bspp_unit_type);
+
+struct bspp_pps_info *bspp_get_pps_hdr(void *str_res_handle, unsigned int pps_id);
+
+struct bspp_sequence_hdr_info *bspp_get_sequ_hdr(void *str_res_handle,
+						 unsigned int sequ_id);
+
+struct bspp_vps_info *bspp_get_vpshdr(void *str_res, unsigned int vps_id);
+
+void bspp_streamrelese_rawbstrdataplain(const void *str_res,
+					const void *rawdata);
+
+void bspp_freeraw_sei_datacontainer(const void *str_res,
+				    struct vdec_raw_bstr_data *rawsei_datacontainer);
+
+void bspp_freeraw_sei_datalist(const void *str_res,
+			       struct vdec_raw_bstr_data *rawsei_datalist);
+
+#endif /* __BSPP_INT_H__   */
diff --git a/drivers/staging/media/vxd/decoder/h264_secure_parser.c b/drivers/staging/media/vxd/decoder/h264_secure_parser.c
new file mode 100644
index 000000000000..3973749eac58
--- /dev/null
+++ b/drivers/staging/media/vxd/decoder/h264_secure_parser.c
@@ -0,0 +1,3051 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * h.264 secure data unit parsing API.
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ *	Lakshmi Sankar <lakshmisankar-t@ti.com>
+ * Re-written for upstreming
+ *	Prashanth Kumar Amai <prashanth.ka@pathpartnertech.com>
+ *	Sidraya Jayagond <sidraya.bj@pathpartnertech.com>
+ */
+
+#include <linux/dma-mapping.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-mem2mem.h>
+
+#include "bspp.h"
+#include "bspp_int.h"
+#include "h264_secure_parser.h"
+#include "pixel_api.h"
+#include "swsr.h"
+#include "vdec_defs.h"
+
+/*
+ * Reduce DPB to 1 when no pic reordering.
+ */
+#define SL_MAX_REF_IDX          32
+#define VUI_CPB_CNT_MAX         32
+#define MAX_SPS_COUNT           32
+#define MAX_PPS_COUNT           256
+/* changed from 810 */
+#define MAX_SLICE_GROUPMBS      65536
+#define MAX_SPS_COUNT           32
+#define MAX_PPS_COUNT           256
+#define MAX_SLICEGROUP_COUNT    8
+#define MAX_WIDTH_IN_MBS        256
+#define MAX_HEIGHT_IN_MBS       256
+#define MAX_COLOR_PLANE         4
+#define H264_MAX_SGM_SIZE       8196
+
+#define H264_MAX_CHROMA_QP_INDEX_OFFSET (12)
+#define H264_MIN_CHROMA_QP_INDEX_OFFSET (-12)
+
+/*
+ * AVC Profile IDC definitions
+ */
+enum h264_profile_idc {
+	h264_profile_cavlc444   = 44,   /*  YUV 4:4:4/14 "CAVLC 4:4:4" */
+	h264_profile_baseline   = 66,   /* YUV 4:2:0/8  "Baseline" */
+	h264_profile_main       = 77,   /* YUV 4:2:0/8  "Main" */
+	h264_profile_scalable   = 83,   /* YUV 4:2:0/8  "Scalable" */
+	h264_profile_extended   = 88,   /* YUV 4:2:0/8  "Extended" */
+	h264_profile_high       = 100,  /* YUV 4:2:0/8  "High" */
+	h264_profile_hig10     = 110,  /* YUV 4:2:0/10 "High 10" */
+	h264_profile_mvc_high   = 118,  /* YUV 4:2:0/8  "Multiview High" */
+	h264_profile_high422    = 122,  /* YUV 4:2:2/10 "High 4:2:2" */
+	h264_profile_mvc_stereo = 128,  /* YUV 4:2:0/8  "Stereo High" */
+	h264_profile_high444    = 244,  /* YUV 4:4:4/14 "High 4:4:4" */
+	h264_profile_FORCE32BITS = 0x7FFFFFFFU
+};
+
+/*
+ * Remap H.264 colour format into internal representation.
+ */
+static const enum pixel_fmt_idc pixel_format_idc[] = {
+	PIXEL_FORMAT_MONO,
+	PIXEL_FORMAT_420,
+	PIXEL_FORMAT_422,
+	PIXEL_FORMAT_444,
+};
+
+/*
+ * Pixel Aspect Ratio
+ */
+static const unsigned short pixel_aspect[17][2] = {
+	{ 0, 1 },
+	{ 1, 1 },
+	{ 12, 11 },
+	{ 10, 11 },
+	{ 16, 11 },
+	{ 40, 33 },
+	{ 24, 11 },
+	{ 20, 11 },
+	{ 32, 11 },
+	{ 80, 33 },
+	{ 18, 11 },
+	{ 15, 11 },
+	{ 64, 33 },
+	{ 160, 99 },
+	{ 4, 3 },
+	{ 3, 2 },
+	{ 2, 1 },
+};
+
+/*
+ * Table 7-3, 7-4: Default Scaling lists
+ */
+static const unsigned char default_4x4_intra[16] = {
+	6, 13, 13, 20,
+	20, 20, 28, 28,
+	28, 28, 32, 32,
+	32, 37, 37, 42
+};
+
+static const unsigned char default_4x4_inter[16] = {
+	10, 14, 14, 20,
+	20, 20, 24, 24,
+	24, 24, 27, 27,
+	27, 30, 30, 34
+};
+
+static const unsigned char default_8x8_intra[64] = {
+	6, 10, 10, 13, 11, 13, 16, 16,
+	16, 16, 18, 18, 18, 18, 18, 23,
+	23, 23, 23, 23, 23, 25, 25, 25,
+	25, 25, 25, 25, 27, 27, 27, 27,
+	27, 27, 27, 27, 29, 29, 29, 29,
+	29, 29, 29, 31, 31, 31, 31, 31,
+	31, 33, 33, 33, 33, 33, 36, 36,
+	36, 36, 38, 38, 38, 40, 40, 42
+};
+
+static const unsigned char default_8x8_inter[64] = {
+	9, 13, 13, 15, 13, 15, 17, 17,
+	17, 17, 19, 19, 19, 19, 19, 21,
+	21, 21, 21, 21, 21, 22, 22, 22,
+	22, 22, 22, 22, 24, 24, 24, 24,
+	24, 24, 24, 24, 25, 25, 25, 25,
+	25, 25, 25, 27, 27, 27, 27, 27,
+	27, 28, 28, 28, 28, 28, 30, 30,
+	30, 30, 32, 32, 32, 33, 33, 35
+};
+
+/*
+ * to be use if no q matrix is chosen
+ */
+static const unsigned char default_4x4_org[16] = {
+	16, 16, 16, 16,
+	16, 16, 16, 16,
+	16, 16, 16, 16,
+	16, 16, 16, 16
+};
+
+/*
+ * to be use if no q matrix is chosen
+ */
+static const unsigned char default_8x8_org[64] = {
+	16, 16, 16, 16, 16, 16, 16, 16,
+	16, 16, 16, 16, 16, 16, 16, 16,
+	16, 16, 16, 16, 16, 16, 16, 16,
+	16, 16, 16, 16, 16, 16, 16, 16,
+	16, 16, 16, 16, 16, 16, 16, 16,
+	16, 16, 16, 16, 16, 16, 16, 16,
+	16, 16, 16, 16, 16, 16, 16, 16,
+	16, 16, 16, 16, 16, 16, 16, 16
+};
+
+/*
+ * source: ITU-T H.264 2010/03, page 20 Table 6-1
+ */
+static const int bspp_subheightc[] = { -1, 2, 1, 1 };
+
+/*
+ * source: ITU-T H.264 2010/03, page 20 Table 6-1
+ */
+static const int bspp_subwidthc[] = { -1, 2, 2, 1 };
+
+/*
+ * inline functions for Minimum and Maximum value
+ */
+static inline unsigned int umin(unsigned int a, unsigned int b)
+{
+	return (((a) < (b)) ? (a) : (b));
+}
+
+static inline int smin(int a, int b)
+{
+	return (((a) < (b)) ? (a) : (b));
+}
+
+static inline int smax(int a, int b)
+{
+	return (((a) > (b)) ? (a) : (b));
+}
+
+static void set_if_not_determined_yet(int *determined,
+				      unsigned char condition,
+				      int *target,
+				      unsigned int value)
+{
+	if ((!(*determined)) && (condition)) {
+		*target = value;
+		*determined = 1;
+	}
+}
+
+static int bspp_h264_get_subwidthc(int chroma_format_idc, int separate_colour_plane_flag)
+{
+	return bspp_subwidthc[chroma_format_idc];
+}
+
+static int bspp_h264_get_subheightc(int chroma_format_idc, int separate_colour_plane_flag)
+{
+	return bspp_subheightc[chroma_format_idc];
+}
+
+static unsigned int h264ceillog2(unsigned int value)
+{
+	unsigned int status = 0;
+
+	value -= 1;
+	while (value > 0) {
+		value >>= 1;
+		status++;
+	}
+	return status;
+}
+
+/*
+ * @Function              bspp_h264_set_default_vui
+ * @Description           Sets default values of the VUI info
+ */
+static void bspp_h264_set_default_vui(struct bspp_h264_vui_info *vui_info)
+{
+	unsigned int *nal_hrd_bitrate_valueminus1 = NULL;
+	unsigned int *vcl_hrd_bitrate_valueminus1 = NULL;
+	unsigned int *nal_hrd_cpbsize_valueminus1 = NULL;
+	unsigned int *vcl_hrd_cpbsize_valueminus1 = NULL;
+	unsigned char *nal_hrd_cbr_flag = NULL;
+	unsigned char *vcl_hrd_cbr_flag = NULL;
+
+	/* Saving pointers */
+	nal_hrd_bitrate_valueminus1 = vui_info->nal_hrd_parameters.bit_rate_value_minus1;
+	vcl_hrd_bitrate_valueminus1 = vui_info->vcl_hrd_parameters.bit_rate_value_minus1;
+
+	nal_hrd_cpbsize_valueminus1 = vui_info->nal_hrd_parameters.cpb_size_value_minus1;
+	vcl_hrd_cpbsize_valueminus1 = vui_info->vcl_hrd_parameters.cpb_size_value_minus1;
+
+	nal_hrd_cbr_flag = vui_info->nal_hrd_parameters.cbr_flag;
+	vcl_hrd_cbr_flag = vui_info->vcl_hrd_parameters.cbr_flag;
+
+	/* Cleaning sVUIInfo */
+	if (vui_info->nal_hrd_parameters.bit_rate_value_minus1)
+		memset(vui_info->nal_hrd_parameters.bit_rate_value_minus1, 0x00,
+		       VDEC_H264_MAXIMUMVALUEOFCPB_CNT * sizeof(unsigned int));
+
+	if (vui_info->nal_hrd_parameters.cpb_size_value_minus1)
+		memset(vui_info->nal_hrd_parameters.cpb_size_value_minus1, 0x00,
+		       VDEC_H264_MAXIMUMVALUEOFCPB_CNT * sizeof(unsigned int));
+
+	if (vui_info->vcl_hrd_parameters.cpb_size_value_minus1)
+		memset(vui_info->vcl_hrd_parameters.cpb_size_value_minus1, 0x00,
+		       VDEC_H264_MAXIMUMVALUEOFCPB_CNT * sizeof(unsigned int));
+
+	if (vui_info->nal_hrd_parameters.cbr_flag)
+		memset(vui_info->nal_hrd_parameters.cbr_flag, 0x00,
+		       VDEC_H264_MAXIMUMVALUEOFCPB_CNT * sizeof(unsigned char));
+
+	if (vui_info->vcl_hrd_parameters.cbr_flag)
+		memset(vui_info->vcl_hrd_parameters.cbr_flag, 0x00,
+		       VDEC_H264_MAXIMUMVALUEOFCPB_CNT * sizeof(unsigned char));
+
+	/* Make sure you set default for everything */
+	memset(vui_info, 0, sizeof(*vui_info));
+	vui_info->video_format                            = 5;
+	vui_info->colour_primaries                        = 2;
+	vui_info->transfer_characteristics                = 2;
+	vui_info->matrix_coefficients                     = 2;
+	vui_info->motion_vectors_over_pic_boundaries_flag = 1;
+	vui_info->max_bytes_per_pic_denom                 = 2;
+	vui_info->max_bits_per_mb_denom                   = 1;
+	vui_info->log2_max_mv_length_horizontal           = 16;
+	vui_info->log2_max_mv_length_vertical             = 16;
+
+#ifdef REDUCED_DPB_NO_PIC_REORDERING
+	vui_info->max_dec_frame_buffering                 = 1;
+	vui_info->num_reorder_frames                      = 0;
+#else
+	vui_info->max_dec_frame_buffering                 = 0;
+	vui_info->num_reorder_frames                      = vui_info->max_dec_frame_buffering;
+#endif
+
+	/* Restoring pointers */
+	vui_info->nal_hrd_parameters.bit_rate_value_minus1 = nal_hrd_bitrate_valueminus1;
+	vui_info->vcl_hrd_parameters.bit_rate_value_minus1 = vcl_hrd_bitrate_valueminus1;
+
+	vui_info->nal_hrd_parameters.cpb_size_value_minus1 = nal_hrd_cpbsize_valueminus1;
+	vui_info->vcl_hrd_parameters.cpb_size_value_minus1 = vcl_hrd_cpbsize_valueminus1;
+
+	vui_info->nal_hrd_parameters.cbr_flag = nal_hrd_cbr_flag;
+	vui_info->vcl_hrd_parameters.cbr_flag = vcl_hrd_cbr_flag;
+}
+
+/*
+ * @Function              bspp_h264_hrd_param_parser
+ * @Description           Parse the HRD parameter
+ */
+static enum bspp_error_type bspp_h264_hrd_param_parser
+					(void *swsr_context,
+					 struct bspp_h264_hrdparam_info *h264_hrd_param_info)
+{
+	unsigned int sched_sel_idx;
+
+	VDEC_ASSERT(swsr_context);
+	h264_hrd_param_info->cpb_cnt_minus1 = swsr_read_unsigned_expgoulomb(swsr_context);
+
+	if (h264_hrd_param_info->cpb_cnt_minus1 >= 32)
+		pr_info("pb_cnt_minus1 is not within the range");
+
+	h264_hrd_param_info->bit_rate_scale = swsr_read_bits(swsr_context, 4);
+	h264_hrd_param_info->cpb_size_scale = swsr_read_bits(swsr_context, 4);
+
+	if (!h264_hrd_param_info->bit_rate_value_minus1) {
+		h264_hrd_param_info->bit_rate_value_minus1 = kcalloc
+							(VDEC_H264_MAXIMUMVALUEOFCPB_CNT,
+							 sizeof(unsigned int), GFP_KERNEL);
+		VDEC_ASSERT(h264_hrd_param_info->bit_rate_value_minus1);
+		if (!h264_hrd_param_info->bit_rate_value_minus1)
+			return BSPP_ERROR_OUT_OF_MEMORY;
+	}
+
+	if (!h264_hrd_param_info->cpb_size_value_minus1) {
+		h264_hrd_param_info->cpb_size_value_minus1 = kcalloc
+							(VDEC_H264_MAXIMUMVALUEOFCPB_CNT,
+							 sizeof(unsigned int),
+							 GFP_KERNEL);
+		VDEC_ASSERT(h264_hrd_param_info->cpb_size_value_minus1);
+		if (!h264_hrd_param_info->cpb_size_value_minus1)
+			return BSPP_ERROR_OUT_OF_MEMORY;
+	}
+
+	if (!h264_hrd_param_info->cbr_flag) {
+		h264_hrd_param_info->cbr_flag =
+			kcalloc(VDEC_H264_MAXIMUMVALUEOFCPB_CNT, sizeof(unsigned char), GFP_KERNEL);
+		VDEC_ASSERT(h264_hrd_param_info->cbr_flag);
+		if (!h264_hrd_param_info->cbr_flag)
+			return BSPP_ERROR_OUT_OF_MEMORY;
+	}
+
+	for (sched_sel_idx = 0; sched_sel_idx <= h264_hrd_param_info->cpb_cnt_minus1;
+		sched_sel_idx++) {
+		h264_hrd_param_info->bit_rate_value_minus1[sched_sel_idx] =
+			swsr_read_unsigned_expgoulomb(swsr_context);
+		h264_hrd_param_info->cpb_size_value_minus1[sched_sel_idx] =
+			swsr_read_unsigned_expgoulomb(swsr_context);
+
+		if (h264_hrd_param_info->cpb_size_value_minus1[sched_sel_idx] == 0xffffffff)
+			/* 65 bit pattern, 32 0's -1 - 32 0's then value should be 0 */
+			h264_hrd_param_info->cpb_size_value_minus1[sched_sel_idx] = 0;
+
+		h264_hrd_param_info->cbr_flag[sched_sel_idx] = swsr_read_bits(swsr_context, 1);
+	}
+
+	h264_hrd_param_info->initial_cpb_removal_delay_length_minus1 = swsr_read_bits(swsr_context,
+										      5);
+	h264_hrd_param_info->cpb_removal_delay_length_minus1 = swsr_read_bits(swsr_context, 5);
+	h264_hrd_param_info->dpb_output_delay_length_minus1 = swsr_read_bits(swsr_context, 5);
+	h264_hrd_param_info->time_offset_length = swsr_read_bits(swsr_context, 5);
+
+	return BSPP_ERROR_NONE;
+}
+
+/*
+ * @Function              bspp_h264_get_default_hrd_param
+ * @Description           Get default value of the HRD parameter
+ */
+static void bspp_h264_get_default_hrd_param(struct bspp_h264_hrdparam_info *h264_hrd_param_info)
+{
+	/* other parameters already set to '0' */
+	h264_hrd_param_info->initial_cpb_removal_delay_length_minus1 = 23;
+	h264_hrd_param_info->cpb_removal_delay_length_minus1        = 23;
+	h264_hrd_param_info->dpb_output_delay_length_minus1         = 23;
+	h264_hrd_param_info->time_offset_length                   = 24;
+}
+
+/*
+ * @Function              bspp_h264_vui_parser
+ * @Description           Parse the VUI info
+ */
+static enum bspp_error_type bspp_h264_vui_parser(void *swsr_context,
+						 struct bspp_h264_vui_info *vui_info,
+						 struct bspp_h264_sps_info *sps_info)
+{
+	enum bspp_error_type vui_parser_error = BSPP_ERROR_NONE;
+
+	vui_info->aspect_ratio_info_present_flag = swsr_read_bits(swsr_context, 1);
+	if (vui_info->aspect_ratio_info_present_flag) {
+		vui_info->aspect_ratio_idc = swsr_read_bits(swsr_context, 8);
+		/* Extended SAR */
+		if (vui_info->aspect_ratio_idc == 255) {
+			vui_info->sar_width = swsr_read_bits(swsr_context, 16);
+			vui_info->sar_height = swsr_read_bits(swsr_context, 16);
+		} else if (vui_info->aspect_ratio_idc < 17) {
+			vui_info->sar_width = pixel_aspect[vui_info->aspect_ratio_idc][0];
+			vui_info->sar_height = pixel_aspect[vui_info->aspect_ratio_idc][1];
+		} else {
+			/* we can consider this error as a aux data error */
+			vui_parser_error |= BSPP_ERROR_INVALID_VALUE;
+		}
+	}
+
+	vui_info->overscan_info_present_flag = swsr_read_bits(swsr_context, 1);
+	if (vui_info->overscan_info_present_flag)
+		vui_info->overscan_appropriate_flag = swsr_read_bits(swsr_context, 1);
+
+	vui_info->video_signal_type_present_flag = swsr_read_bits(swsr_context, 1);
+	if (vui_info->video_signal_type_present_flag) {
+		vui_info->video_format = swsr_read_bits(swsr_context, 3);
+		vui_info->video_full_range_flag = swsr_read_bits(swsr_context, 1);
+		vui_info->colour_description_present_flag = swsr_read_bits(swsr_context, 1);
+		if (vui_info->colour_description_present_flag) {
+			vui_info->colour_primaries = swsr_read_bits(swsr_context, 8);
+			vui_info->transfer_characteristics = swsr_read_bits(swsr_context, 8);
+			vui_info->matrix_coefficients = swsr_read_bits(swsr_context, 8);
+		}
+	}
+
+	vui_info->chroma_location_info_present_flag = swsr_read_bits(swsr_context, 1);
+	if (vui_info->chroma_location_info_present_flag) {
+		vui_info->chroma_sample_loc_type_top_field = swsr_read_unsigned_expgoulomb
+										(swsr_context);
+		vui_info->chroma_sample_loc_type_bottom_field = swsr_read_unsigned_expgoulomb
+										(swsr_context);
+	}
+
+	vui_info->timing_info_present_flag = swsr_read_bits(swsr_context, 1);
+	if (vui_info->timing_info_present_flag) {
+		vui_info->num_units_in_tick = swsr_read_bits(swsr_context, 16);
+		vui_info->num_units_in_tick <<= 16;     /* SR can only do up to 31 bit reads */
+		vui_info->num_units_in_tick |= swsr_read_bits(swsr_context, 16);
+		vui_info->time_scale = swsr_read_bits(swsr_context, 16);
+		vui_info->time_scale <<= 16;     /* SR can only do up to 31 bit reads */
+		vui_info->time_scale |= swsr_read_bits(swsr_context, 16);
+		if (!vui_info->num_units_in_tick || !vui_info->time_scale)
+			vui_parser_error  |=  BSPP_ERROR_INVALID_VALUE;
+
+		vui_info->fixed_frame_rate_flag = swsr_read_bits(swsr_context, 1);
+	}
+
+	/* no default values */
+	vui_info->nal_hrd_parameters_present_flag = swsr_read_bits(swsr_context, 1);
+	if (vui_info->nal_hrd_parameters_present_flag)
+		vui_parser_error |= bspp_h264_hrd_param_parser(swsr_context,
+				&vui_info->nal_hrd_parameters);
+	else
+		bspp_h264_get_default_hrd_param(&vui_info->nal_hrd_parameters);
+
+	vui_info->vcl_hrd_parameters_present_flag = swsr_read_bits(swsr_context, 1);
+
+	if (vui_info->vcl_hrd_parameters_present_flag)
+		vui_parser_error |= bspp_h264_hrd_param_parser(swsr_context,
+				&vui_info->vcl_hrd_parameters);
+	else
+		bspp_h264_get_default_hrd_param(&vui_info->vcl_hrd_parameters);
+
+	if (vui_info->nal_hrd_parameters_present_flag || vui_info->vcl_hrd_parameters_present_flag)
+		vui_info->low_delay_hrd_flag = swsr_read_bits(swsr_context, 1);
+
+	vui_info->pic_struct_present_flag = swsr_read_bits(swsr_context, 1);
+	vui_info->bitstream_restriction_flag = swsr_read_bits(swsr_context, 1);
+	if (vui_info->bitstream_restriction_flag) {
+		vui_info->motion_vectors_over_pic_boundaries_flag = swsr_read_bits(swsr_context, 1);
+		vui_info->max_bytes_per_pic_denom = swsr_read_unsigned_expgoulomb(swsr_context);
+		vui_info->max_bits_per_mb_denom = swsr_read_unsigned_expgoulomb(swsr_context);
+		vui_info->log2_max_mv_length_horizontal =
+			swsr_read_unsigned_expgoulomb(swsr_context);
+		vui_info->log2_max_mv_length_vertical = swsr_read_unsigned_expgoulomb(swsr_context);
+		vui_info->num_reorder_frames = swsr_read_unsigned_expgoulomb(swsr_context);
+		vui_info->max_dec_frame_buffering = swsr_read_unsigned_expgoulomb(swsr_context);
+	}
+
+	if ((sps_info->profile_idc == h264_profile_baseline ||
+	     sps_info->profile_idc == h264_profile_extended) &&
+	    sps_info->max_num_ref_frames == 1) {
+		vui_info->bitstream_restriction_flag = 1;
+		vui_info->num_reorder_frames = 0;
+		vui_info->max_dec_frame_buffering = 1;
+	}
+
+	if (vui_info->num_reorder_frames > 32)
+		vui_parser_error |= BSPP_ERROR_UNSUPPORTED;
+
+	return vui_parser_error;
+}
+
+/*
+ * Parse scaling list
+ */
+static enum bspp_error_type bspp_h264_scl_listparser(void *swsr_context,
+						     unsigned char *scaling_list,
+						     unsigned char sizeof_scaling_list,
+						     unsigned char *usedefaultscalingmatrixflag)
+{
+	enum bspp_error_type parse_error = BSPP_ERROR_NONE;
+	int delta_scale;
+	unsigned int lastscale = 8;
+	unsigned int nextscale = 8;
+	unsigned int j;
+
+	VDEC_ASSERT(swsr_context);
+	VDEC_ASSERT(scaling_list);
+	VDEC_ASSERT(usedefaultscalingmatrixflag);
+
+	if (!scaling_list || !swsr_context || !usedefaultscalingmatrixflag) {
+		parse_error = BSPP_ERROR_UNRECOVERABLE;
+		return parse_error;
+	}
+
+	/* 7.3.2.1.1 */
+	for (j = 0; j < sizeof_scaling_list; j++) {
+		if (nextscale != 0) {
+			delta_scale = swsr_read_signed_expgoulomb(swsr_context);
+			if ((-128 > delta_scale) || delta_scale > 127)
+				parse_error |= BSPP_ERROR_INVALID_VALUE;
+			nextscale = (lastscale + delta_scale + 256) & 0xff;
+			*usedefaultscalingmatrixflag = (j == 0 && nextscale == 0);
+		}
+		scaling_list[j] = (nextscale == 0) ? lastscale : nextscale;
+		lastscale = scaling_list[j];
+	}
+	return parse_error;
+}
+
+/*
+ * Parse the SPS NAL unit
+ */
+static enum bspp_error_type bspp_h264_sps_parser(void *swsr_context,
+						 void *str_res,
+						 struct bspp_h264_seq_hdr_info *h264_seq_hdr_info)
+{
+	unsigned int i;
+	unsigned char scaling_list_num;
+	struct bspp_h264_sps_info *sps_info;
+	struct bspp_h264_vui_info *vui_info;
+	enum bspp_error_type sps_parser_error = BSPP_ERROR_NONE;
+	enum bspp_error_type vui_parser_error = BSPP_ERROR_NONE;
+
+	sps_info = &h264_seq_hdr_info->sps_info;
+	vui_info = &h264_seq_hdr_info->vui_info;
+
+	/* Set always the default VUI/MVCExt, their values
+	 * may be used even if VUI/MVCExt not present
+	 */
+	bspp_h264_set_default_vui(vui_info);
+#ifdef DEBUG_DECODER_DRIVER
+	pr_info("Parsing Sequence Parameter Set");
+#endif
+	sps_info->profile_idc = swsr_read_bits(swsr_context, 8);
+	if (sps_info->profile_idc != H264_PROFILE_BASELINE &&
+	    sps_info->profile_idc != H264_PROFILE_MAIN &&
+	    sps_info->profile_idc != H264_PROFILE_SCALABLE &&
+	    sps_info->profile_idc != H264_PROFILE_EXTENDED &&
+	    sps_info->profile_idc != H264_PROFILE_HIGH &&
+	    sps_info->profile_idc != H264_PROFILE_HIGH10 &&
+	    sps_info->profile_idc != H264_PROFILE_MVC_HIGH &&
+	    sps_info->profile_idc != H264_PROFILE_HIGH422 &&
+	    sps_info->profile_idc != H264_PROFILE_CAVLC444 &&
+	    sps_info->profile_idc != H264_PROFILE_MVC_STEREO &&
+	    sps_info->profile_idc != H264_PROFILE_HIGH444) {
+		pr_err("Invalid Profile ID [%d],Parsed by BSPP", sps_info->profile_idc);
+		return BSPP_ERROR_UNSUPPORTED;
+	}
+	sps_info->constraint_set_flags = swsr_read_bits(swsr_context, 8);
+	sps_info->level_idc = swsr_read_bits(swsr_context, 8);
+
+	/* sequence parameter set id */
+	sps_info->seq_parameter_set_id = swsr_read_unsigned_expgoulomb(swsr_context);
+	if (sps_info->seq_parameter_set_id >= MAX_SPS_COUNT) {
+		pr_err("SPS ID [%d] goes beyond the limit", sps_info->seq_parameter_set_id);
+		return BSPP_ERROR_UNSUPPORTED;
+	}
+
+	/* High profile settings */
+	if (sps_info->profile_idc == H264_PROFILE_HIGH ||
+	    sps_info->profile_idc == H264_PROFILE_HIGH10 ||
+	    sps_info->profile_idc == H264_PROFILE_HIGH422 ||
+	    sps_info->profile_idc == H264_PROFILE_HIGH444 ||
+	    sps_info->profile_idc == H264_PROFILE_CAVLC444 ||
+	    sps_info->profile_idc == H264_PROFILE_MVC_HIGH ||
+	    sps_info->profile_idc == H264_PROFILE_MVC_STEREO) {
+#ifdef DEBUG_DECODER_DRIVER
+		pr_info("This is High Profile Bitstream");
+#endif
+		sps_info->chroma_format_idc = swsr_read_unsigned_expgoulomb(swsr_context);
+		if (sps_info->chroma_format_idc > 3) {
+			pr_err("chroma_format_idc[%d] is not within the range",
+			       sps_info->chroma_format_idc);
+			sps_parser_error |= BSPP_ERROR_INVALID_VALUE;
+		}
+		if (sps_info->chroma_format_idc == 3)
+			sps_info->separate_colour_plane_flag = swsr_read_bits(swsr_context, 1);
+		else
+			sps_info->separate_colour_plane_flag = 0;
+
+		sps_info->bit_depth_luma_minus8 = swsr_read_unsigned_expgoulomb(swsr_context);
+		if (sps_info->bit_depth_luma_minus8 > 6)
+			sps_parser_error |= BSPP_ERROR_INVALID_VALUE;
+
+		sps_info->bit_depth_chroma_minus8 = swsr_read_unsigned_expgoulomb(swsr_context);
+		if (sps_info->bit_depth_chroma_minus8 > 6)
+			sps_parser_error |= BSPP_ERROR_INVALID_VALUE;
+
+		sps_info->qpprime_y_zero_transform_bypass_flag = swsr_read_bits(swsr_context, 1);
+		sps_info->seq_scaling_matrix_present_flag = swsr_read_bits(swsr_context, 1);
+		if (sps_info->seq_scaling_matrix_present_flag) {
+#ifdef DEBUG_DECODER_DRIVER
+			pr_info("seq_scaling_matrix_present_flag is available");
+#endif
+			scaling_list_num = (sps_info->chroma_format_idc != 3) ? 8 : 12;
+
+			if (!sps_info->scllst4x4seq) {
+				sps_info->scllst4x4seq =
+					kmalloc((sizeof(unsigned char[H264FW_NUM_4X4_LISTS]
+							[H264FW_4X4_SIZE])), GFP_KERNEL);
+				if (!sps_info->scllst4x4seq) {
+					sps_parser_error |= BSPP_ERROR_OUT_OF_MEMORY;
+				} else {
+					VDEC_ASSERT(sps_info->scllst4x4seq);
+					memset(sps_info->scllst4x4seq, 0x00,
+					       sizeof(unsigned char[H264FW_NUM_4X4_LISTS]
+					       [H264FW_4X4_SIZE]));
+				}
+			}
+			if (!sps_info->scllst8x8seq) {
+				sps_info->scllst8x8seq =
+					kmalloc((sizeof(unsigned char[H264FW_NUM_8X8_LISTS]
+							[H264FW_8X8_SIZE])), GFP_KERNEL);
+				if (!sps_info->scllst8x8seq) {
+					sps_parser_error |= BSPP_ERROR_OUT_OF_MEMORY;
+				} else {
+					VDEC_ASSERT(sps_info->scllst8x8seq);
+					memset(sps_info->scllst8x8seq, 0x00,
+					       sizeof(unsigned char[H264FW_NUM_8X8_LISTS]
+						      [H264FW_8X8_SIZE]));
+				}
+			}
+
+		{
+			unsigned char(*scllst4x4seq)[H264FW_NUM_4X4_LISTS]
+				[H264FW_4X4_SIZE] =
+			(unsigned char (*)[H264FW_NUM_4X4_LISTS][H264FW_4X4_SIZE])
+						sps_info->scllst4x4seq;
+			unsigned char(*scllst8x8seq)[H264FW_NUM_8X8_LISTS]
+				[H264FW_8X8_SIZE] =
+				(unsigned char (*)[H264FW_NUM_8X8_LISTS]
+				 [H264FW_8X8_SIZE])
+				sps_info->scllst8x8seq;
+
+			for (i = 0; i < scaling_list_num; i++) {
+				unsigned char *ptr =
+					&sps_info->usedefaultscalingmatrixflag_seq[i];
+
+				sps_info->seq_scaling_list_present_flag[i] =
+							swsr_read_bits(swsr_context, 1);
+				if (sps_info->seq_scaling_list_present_flag[i]) {
+					if (i < 6) {
+						sps_parser_error |=
+							bspp_h264_scl_listparser
+							(swsr_context,
+							 (*scllst4x4seq)[i], 16,
+							 ptr);
+					} else {
+						sps_parser_error |=
+							bspp_h264_scl_listparser
+							(swsr_context,
+							 (*scllst8x8seq)[i - 6], 64,
+							 ptr);
+					}
+				}
+			}
+		}
+		}
+	} else {
+		/* default values in here */
+		sps_info->chroma_format_idc = 1;
+		sps_info->bit_depth_luma_minus8 = 0;
+		sps_info->bit_depth_chroma_minus8 = 0;
+		sps_info->qpprime_y_zero_transform_bypass_flag = 0;
+		sps_info->seq_scaling_matrix_present_flag = 0;
+	}
+
+	sps_info->log2_max_frame_num_minus4 = swsr_read_unsigned_expgoulomb(swsr_context);
+	if (sps_info->log2_max_frame_num_minus4 > 12) {
+		pr_err("log2_max_frame_num_minus4[%d] is not within range  [0 - 12]",
+		       sps_info->log2_max_frame_num_minus4);
+		sps_parser_error |= BSPP_ERROR_INVALID_VALUE;
+	}
+
+	sps_info->pic_order_cnt_type = swsr_read_unsigned_expgoulomb(swsr_context);
+	if (sps_info->pic_order_cnt_type > 2) {
+		pr_err("pic_order_cnt_type[%d] is not within range  [0 - 2]",
+		       sps_info->pic_order_cnt_type);
+		sps_parser_error |= BSPP_ERROR_INVALID_VALUE;
+	}
+
+	if (sps_info->pic_order_cnt_type == 0) {
+		sps_info->log2_max_pic_order_cnt_lsb_minus4 = swsr_read_unsigned_expgoulomb
+										(swsr_context);
+		if (sps_info->log2_max_pic_order_cnt_lsb_minus4 > 12) {
+			pr_err("log2_max_pic_order_cnt_lsb_minus4[%d] is not within range  [0 - 12]",
+			       sps_info->log2_max_pic_order_cnt_lsb_minus4);
+			sps_info->log2_max_pic_order_cnt_lsb_minus4 = 12;
+			sps_parser_error |= BSPP_ERROR_CORRECTION_VALIDVALUE;
+		}
+	} else if (sps_info->pic_order_cnt_type == 1) {
+		sps_info->delta_pic_order_always_zero_flag = swsr_read_bits(swsr_context, 1);
+		sps_info->offset_for_non_ref_pic = swsr_read_signed_expgoulomb(swsr_context);
+		sps_info->offset_for_top_to_bottom_field = swsr_read_signed_expgoulomb
+										(swsr_context);
+		sps_info->num_ref_frames_in_pic_order_cnt_cycle = swsr_read_unsigned_expgoulomb
+										(swsr_context);
+		if (sps_info->num_ref_frames_in_pic_order_cnt_cycle > 255) {
+			pr_err("num_ref_frames_in_pic_order_cnt_cycle[%d] is not within range  [0 - 256]",
+			       sps_info->num_ref_frames_in_pic_order_cnt_cycle);
+			sps_parser_error |= BSPP_ERROR_INVALID_VALUE;
+		}
+
+		if (!sps_info->offset_for_ref_frame) {
+			sps_info->offset_for_ref_frame =
+				kmalloc((H264FW_MAX_CYCLE_REF_FRAMES * sizeof(unsigned int)),
+					GFP_KERNEL);
+			if (!sps_info->offset_for_ref_frame) {
+				pr_err("out of memory");
+				sps_parser_error |= BSPP_ERROR_OUT_OF_MEMORY;
+			}
+		}
+
+		if (sps_info->offset_for_ref_frame) {
+			VDEC_ASSERT(sps_info->num_ref_frames_in_pic_order_cnt_cycle <=
+				    H264FW_MAX_CYCLE_REF_FRAMES);
+			memset(sps_info->offset_for_ref_frame, 0x00,
+			       (H264FW_MAX_CYCLE_REF_FRAMES * sizeof(unsigned int)));
+			for (i = 0; i < sps_info->num_ref_frames_in_pic_order_cnt_cycle; i++) {
+				/* check the max value and if it crosses then exit from the loop */
+				sps_info->offset_for_ref_frame[i] = swsr_read_signed_expgoulomb
+										(swsr_context);
+			}
+		}
+	} else if (sps_info->pic_order_cnt_type != 2) {
+		sps_parser_error |= BSPP_ERROR_INVALID_VALUE;
+	}
+	sps_info->max_num_ref_frames = swsr_read_unsigned_expgoulomb(swsr_context);
+
+	if (sps_info->max_num_ref_frames > 16) {
+		pr_err("num_ref_frames[%d] is not within range [0 - 16]",
+		       sps_info->max_num_ref_frames);
+		sps_parser_error |= BSPP_ERROR_INVALID_VALUE;
+	}
+	sps_info->gaps_in_frame_num_value_allowed_flag = swsr_read_bits(swsr_context, 1);
+	sps_info->pic_width_in_mbs_minus1 = swsr_read_unsigned_expgoulomb(swsr_context);
+	if (sps_info->pic_width_in_mbs_minus1 >= MAX_WIDTH_IN_MBS) {
+		pr_err("pic_width_in_mbs_minus1[%d] is not within range",
+		       sps_info->pic_width_in_mbs_minus1);
+		sps_parser_error |= BSPP_ERROR_INVALID_VALUE;
+	}
+	sps_info->pic_height_in_map_units_minus1 = swsr_read_unsigned_expgoulomb(swsr_context);
+	if (sps_info->pic_height_in_map_units_minus1 >= MAX_HEIGHT_IN_MBS) {
+		pr_err("pic_height_in_map_units_minus1[%d] is not within range",
+		       sps_info->pic_height_in_map_units_minus1);
+		sps_parser_error |= BSPP_ERROR_INVALID_VALUE;
+	}
+
+	sps_info->frame_mbs_only_flag = swsr_read_bits(swsr_context, 1);
+	if (!sps_info->frame_mbs_only_flag)
+		sps_info->mb_adaptive_frame_field_flag = swsr_read_bits(swsr_context, 1);
+	else
+		sps_info->mb_adaptive_frame_field_flag = 0;
+
+	sps_info->direct_8x8_inference_flag = swsr_read_bits(swsr_context, 1);
+
+	sps_info->frame_cropping_flag = swsr_read_bits(swsr_context, 1);
+	if (sps_info->frame_cropping_flag) {
+		sps_info->frame_crop_left_offset = swsr_read_unsigned_expgoulomb(swsr_context);
+		sps_info->frame_crop_right_offset = swsr_read_unsigned_expgoulomb(swsr_context);
+		sps_info->frame_crop_top_offset = swsr_read_unsigned_expgoulomb(swsr_context);
+		sps_info->frame_crop_bottom_offset = swsr_read_unsigned_expgoulomb(swsr_context);
+	} else {
+		sps_info->frame_crop_left_offset = 0;
+		sps_info->frame_crop_right_offset = 0;
+		sps_info->frame_crop_top_offset = 0;
+		sps_info->frame_crop_bottom_offset = 0;
+	}
+
+	sps_info->vui_parameters_present_flag = swsr_read_bits(swsr_context, 1);
+	/* initialise matrix_coefficients to 2 (unspecified) */
+	vui_info->matrix_coefficients = 2;
+
+	if (sps_info->vui_parameters_present_flag) {
+#ifdef DEBUG_DECODER_DRIVER
+		pr_info("vui_parameters_present_flag is available");
+#endif
+		/* save the SPS parse error in temp variable */
+		vui_parser_error = bspp_h264_vui_parser(swsr_context, vui_info, sps_info);
+		if (vui_parser_error != BSPP_ERROR_NONE)
+			sps_parser_error  |= BSPP_ERROR_AUXDATA;
+
+#ifdef REDUCED_DPB_NO_PIC_REORDERING
+		vui_info->max_dec_frame_buffering = 1;
+		vui_info->num_reorder_frames = 0;
+#endif
+	}
+
+	if (sps_info->profile_idc == H264_PROFILE_MVC_HIGH ||
+	    sps_info->profile_idc == H264_PROFILE_MVC_STEREO) {
+		pr_err("No MVC Support for this version\n");
+	}
+
+	if (swsr_check_exception(swsr_context) != SWSR_EXCEPT_NO_EXCEPTION)
+		sps_parser_error |= BSPP_ERROR_INSUFFICIENT_DATA;
+
+	return sps_parser_error;
+}
+
+/*
+ * Parse the PPS NAL unit
+ */
+static enum bspp_error_type bspp_h264_pps_parser(void *swsr_context,
+						 void *str_res,
+						 struct bspp_h264_pps_info *h264_pps_info)
+{
+	int i, group, chroma_format_idc;
+	unsigned int number_bits_per_slicegroup_id;
+	unsigned char n_scaling_list;
+	unsigned char more_rbsp_data;
+	unsigned int result;
+	enum bspp_error_type pps_parse_error = BSPP_ERROR_NONE;
+
+	VDEC_ASSERT(swsr_context);
+
+	h264_pps_info->pps_id = swsr_read_unsigned_expgoulomb(swsr_context);
+	if (h264_pps_info->pps_id >= MAX_PPS_COUNT) {
+		pr_err("Picture Parameter Set(PPS) ID is not within the range");
+		h264_pps_info->pps_id = (int)BSPP_INVALID;
+		return BSPP_ERROR_UNSUPPORTED;
+	}
+	h264_pps_info->seq_parameter_set_id = swsr_read_unsigned_expgoulomb(swsr_context);
+	if (h264_pps_info->seq_parameter_set_id >= MAX_SPS_COUNT) {
+		pr_err("Sequence Parameter Set(SPS) ID is not within the range");
+		h264_pps_info->seq_parameter_set_id = (int)BSPP_INVALID;
+		return BSPP_ERROR_UNSUPPORTED;
+	}
+
+	{
+		/*
+		 * Get the chroma_format_idc from sps. Because of MVC sharing sps and subset sps ids
+		 * (H.7.4.1.2.1).
+		 * At this point is not clear if this pps refers to an sps or a subset sps.
+		 * It should be finehowever for the case of chroma_format_idc to try and locate
+		 * a subset sps if there isn't a normal one.
+		 */
+		struct bspp_h264_seq_hdr_info *h264_seq_hdr_info;
+		struct bspp_sequence_hdr_info *seq_hdr_info;
+
+		seq_hdr_info = bspp_get_sequ_hdr(str_res, h264_pps_info->seq_parameter_set_id);
+
+		if (!seq_hdr_info) {
+			seq_hdr_info = bspp_get_sequ_hdr(str_res,
+							 h264_pps_info->seq_parameter_set_id + 32);
+			if (!seq_hdr_info)
+				return BSPP_ERROR_NO_SEQUENCE_HDR;
+		}
+
+		h264_seq_hdr_info =
+			(struct bspp_h264_seq_hdr_info *)seq_hdr_info->secure_sequence_info;
+
+		chroma_format_idc = h264_seq_hdr_info->sps_info.chroma_format_idc;
+	}
+
+	h264_pps_info->entropy_coding_mode_flag = swsr_read_bits(swsr_context, 1);
+	h264_pps_info->pic_order_present_flag = swsr_read_bits(swsr_context, 1);
+	h264_pps_info->num_slice_groups_minus1 = swsr_read_unsigned_expgoulomb(swsr_context);
+	if ((h264_pps_info->num_slice_groups_minus1 + 1) >
+		MAX_SLICEGROUP_COUNT) {
+		h264_pps_info->num_slice_groups_minus1 =
+			MAX_SLICEGROUP_COUNT - 1;
+		pps_parse_error |= BSPP_ERROR_UNRECOVERABLE;
+	}
+
+	if (h264_pps_info->num_slice_groups_minus1 > 0) {
+		h264_pps_info->slice_group_map_type = swsr_read_unsigned_expgoulomb(swsr_context);
+		pr_err("slice_group_map_type is : %d, Parsed by BSPP",
+		       h264_pps_info->slice_group_map_type);
+		if (h264_pps_info->slice_group_map_type > 6) {
+			pr_err("slice_group_map_type [%d] is not within the range [ 0- 6 ]",
+			       h264_pps_info->slice_group_map_type);
+			       pps_parse_error |= BSPP_ERROR_UNRECOVERABLE;
+		}
+
+		if (h264_pps_info->slice_group_map_type == 0) {
+			for (group = 0; group <= h264_pps_info->num_slice_groups_minus1; group++) {
+				h264_pps_info->run_length_minus1[group] =
+					swsr_read_unsigned_expgoulomb(swsr_context);
+			}
+		} else if (h264_pps_info->slice_group_map_type == 2) {
+			for (group = 0; group < h264_pps_info->num_slice_groups_minus1; group++) {
+				h264_pps_info->top_left[group] = swsr_read_unsigned_expgoulomb
+										(swsr_context);
+				h264_pps_info->bottom_right[group] =
+					swsr_read_unsigned_expgoulomb(swsr_context);
+			}
+		} else if (h264_pps_info->slice_group_map_type == 3 ||
+			h264_pps_info->slice_group_map_type == 4 ||
+			h264_pps_info->slice_group_map_type == 5) {
+			h264_pps_info->slice_group_change_direction_flag = swsr_read_bits
+									(swsr_context, 1);
+			h264_pps_info->slice_group_change_rate_minus1 =
+				swsr_read_unsigned_expgoulomb(swsr_context);
+		} else if (h264_pps_info->slice_group_map_type == 6) {
+			h264_pps_info->pic_size_in_map_unit = swsr_read_unsigned_expgoulomb
+										(swsr_context);
+			if (h264_pps_info->pic_size_in_map_unit >= H264_MAX_SGM_SIZE) {
+				pr_err("pic_size_in_map_units_minus1 [%d] is not within the range",
+				       h264_pps_info->pic_size_in_map_unit);
+				pps_parse_error |= BSPP_ERROR_UNRECOVERABLE;
+			}
+			number_bits_per_slicegroup_id = h264ceillog2
+				(h264_pps_info->num_slice_groups_minus1 + 1);
+
+			if ((h264_pps_info->pic_size_in_map_unit + 1) >
+				h264_pps_info->h264_ppssgm_info.slicegroupidnum) {
+				unsigned char *slice_group_id =
+					kmalloc(((h264_pps_info->pic_size_in_map_unit + 1) *
+						sizeof(unsigned char)),
+						GFP_KERNEL);
+				if (!slice_group_id) {
+					pr_err("out of memory");
+					pps_parse_error |= BSPP_ERROR_OUT_OF_MEMORY;
+				} else {
+					pr_err("reallocating SGM info from size %lu bytes to size %lu bytes",
+					       h264_pps_info->h264_ppssgm_info.slicegroupidnum *
+					       sizeof(unsigned char),
+					       (h264_pps_info->pic_size_in_map_unit + 1) *
+					       sizeof(unsigned char));
+					if (h264_pps_info->h264_ppssgm_info.slice_group_id) {
+						memcpy
+						(slice_group_id,
+						 h264_pps_info->h264_ppssgm_info.slice_group_id,
+						 h264_pps_info->h264_ppssgm_info.slicegroupidnum *
+						 sizeof(unsigned char));
+						kfree
+						(h264_pps_info->h264_ppssgm_info.slice_group_id);
+					}
+					h264_pps_info->h264_ppssgm_info.slicegroupidnum =
+						(h264_pps_info->pic_size_in_map_unit + 1);
+					h264_pps_info->h264_ppssgm_info.slice_group_id =
+						slice_group_id;
+				}
+			}
+
+			VDEC_ASSERT((h264_pps_info->pic_size_in_map_unit + 1) <=
+				h264_pps_info->h264_ppssgm_info.slicegroupidnum);
+			for (i = 0; i <= h264_pps_info->pic_size_in_map_unit; i++)
+				h264_pps_info->h264_ppssgm_info.slice_group_id[i] =
+					swsr_read_bits(swsr_context, number_bits_per_slicegroup_id);
+		}
+	}
+
+	for (i = 0; i < H264FW_MAX_REFPIC_LISTS; i++) {
+		h264_pps_info->num_ref_idx_lx_active_minus1[i] = swsr_read_unsigned_expgoulomb
+										(swsr_context);
+		if (h264_pps_info->num_ref_idx_lx_active_minus1[i] >=
+			SL_MAX_REF_IDX) {
+			pr_err("num_ref_idx_lx_active_minus1[%d] [%d] is not within the range",
+			       i, h264_pps_info->num_ref_idx_lx_active_minus1[i]);
+			pps_parse_error |= BSPP_ERROR_UNRECOVERABLE;
+		}
+	}
+
+	h264_pps_info->weighted_pred_flag = swsr_read_bits(swsr_context, 1);
+	h264_pps_info->weighted_bipred_idc = swsr_read_bits(swsr_context, 2);
+	h264_pps_info->pic_init_qp_minus26 = swsr_read_signed_expgoulomb(swsr_context);
+	if (h264_pps_info->pic_init_qp_minus26 > 26)
+		pr_err("pic_init_qp_minus26[%d] is not within the range [-25 , 26]",
+		       h264_pps_info->pic_init_qp_minus26);
+
+	h264_pps_info->pic_init_qs_minus26 = swsr_read_signed_expgoulomb(swsr_context);
+	if (h264_pps_info->pic_init_qs_minus26 > 26)
+		pr_err("pic_init_qs_minus26[%d] is not within the range [-25 , 26]",
+		       h264_pps_info->pic_init_qs_minus26);
+
+	h264_pps_info->chroma_qp_index_offset = swsr_read_signed_expgoulomb(swsr_context);
+	if (h264_pps_info->chroma_qp_index_offset > H264_MAX_CHROMA_QP_INDEX_OFFSET)
+		h264_pps_info->chroma_qp_index_offset = H264_MAX_CHROMA_QP_INDEX_OFFSET;
+
+	else if (h264_pps_info->chroma_qp_index_offset < H264_MIN_CHROMA_QP_INDEX_OFFSET)
+		h264_pps_info->chroma_qp_index_offset = H264_MIN_CHROMA_QP_INDEX_OFFSET;
+
+	h264_pps_info->deblocking_filter_control_present_flag = swsr_read_bits(swsr_context, 1);
+	h264_pps_info->constrained_intra_pred_flag = swsr_read_bits(swsr_context, 1);
+	h264_pps_info->redundant_pic_cnt_present_flag = swsr_read_bits(swsr_context, 1);
+
+	/* Check for more rbsp data. */
+	result = swsr_check_more_rbsp_data(swsr_context, &more_rbsp_data);
+	if (result == 0 && more_rbsp_data) {
+#ifdef DEBUG_DECODER_DRIVER
+		pr_info("More RBSP data is available");
+#endif
+		/* Fidelity Range Extensions Stuff */
+		h264_pps_info->transform_8x8_mode_flag = swsr_read_bits(swsr_context, 1);
+		h264_pps_info->pic_scaling_matrix_present_flag = swsr_read_bits(swsr_context, 1);
+		if (h264_pps_info->pic_scaling_matrix_present_flag) {
+			if (!h264_pps_info->scllst4x4pic) {
+				h264_pps_info->scllst4x4pic =
+					kmalloc((sizeof(unsigned char[H264FW_NUM_4X4_LISTS]
+								[H264FW_4X4_SIZE])), GFP_KERNEL);
+				if (!h264_pps_info->scllst4x4pic) {
+					pps_parse_error |= BSPP_ERROR_OUT_OF_MEMORY;
+				} else {
+					VDEC_ASSERT(h264_pps_info->scllst4x4pic);
+					memset(h264_pps_info->scllst4x4pic, 0x00,
+					       sizeof(unsigned char[H264FW_NUM_4X4_LISTS]
+					       [H264FW_4X4_SIZE]));
+				}
+			}
+			if (!h264_pps_info->scllst8x8pic) {
+				h264_pps_info->scllst8x8pic =
+					kmalloc((sizeof(unsigned char[H264FW_NUM_8X8_LISTS]
+								[H264FW_8X8_SIZE])), GFP_KERNEL);
+				if (!h264_pps_info->scllst8x8pic) {
+					pps_parse_error |= BSPP_ERROR_OUT_OF_MEMORY;
+				} else {
+					VDEC_ASSERT(h264_pps_info->scllst8x8pic);
+					memset(h264_pps_info->scllst8x8pic, 0x00,
+					       sizeof(unsigned char[H264FW_NUM_8X8_LISTS]
+					       [H264FW_8X8_SIZE]));
+				}
+			}
+		{
+			unsigned char(*scllst4x4pic)[H264FW_NUM_4X4_LISTS][H264FW_4X4_SIZE] =
+					(unsigned char (*)[H264FW_NUM_4X4_LISTS][H264FW_4X4_SIZE])
+					h264_pps_info->scllst4x4pic;
+			unsigned char(*scllst8x8pic)[H264FW_NUM_8X8_LISTS][H264FW_8X8_SIZE] =
+					(unsigned char (*)[H264FW_NUM_8X8_LISTS][H264FW_8X8_SIZE])
+					h264_pps_info->scllst8x8pic;
+
+			/*
+			 * For chroma_format =3 (YUV444) total list would be 12
+			 * if transform_8x8_mode_flag is enabled else  6.
+			 */
+			n_scaling_list = 6 + (chroma_format_idc != 3 ? 2 : 6) *
+				h264_pps_info->transform_8x8_mode_flag;
+			if (n_scaling_list > 12)
+				pps_parse_error |= BSPP_ERROR_UNRECOVERABLE;
+
+			VDEC_ASSERT(h264_pps_info->scllst4x4pic);
+			VDEC_ASSERT(h264_pps_info->scllst8x8pic);
+			for (i = 0; i < n_scaling_list; i++) {
+				unsigned char *ptr =
+					&h264_pps_info->usedefaultscalingmatrixflag_pic[i];
+
+				h264_pps_info->pic_scaling_list_present_flag[i] =
+					swsr_read_bits(swsr_context, 1);
+				if (h264_pps_info->pic_scaling_list_present_flag[i]) {
+					if (i < 6)
+						pps_parse_error |=
+							bspp_h264_scl_listparser
+							(swsr_context,
+							 (*scllst4x4pic)[i], 16, ptr);
+					else
+						pps_parse_error |=
+							bspp_h264_scl_listparser
+							(swsr_context,
+							(*scllst8x8pic)[i - 6], 64, ptr);
+				}
+			}
+		}
+		}
+		h264_pps_info->second_chroma_qp_index_offset = swsr_read_signed_expgoulomb
+										(swsr_context);
+
+		if (h264_pps_info->second_chroma_qp_index_offset > H264_MAX_CHROMA_QP_INDEX_OFFSET)
+			h264_pps_info->second_chroma_qp_index_offset =
+				H264_MAX_CHROMA_QP_INDEX_OFFSET;
+		else if (h264_pps_info->second_chroma_qp_index_offset <
+			H264_MIN_CHROMA_QP_INDEX_OFFSET)
+			h264_pps_info->second_chroma_qp_index_offset =
+				H264_MIN_CHROMA_QP_INDEX_OFFSET;
+	} else {
+		h264_pps_info->second_chroma_qp_index_offset =
+			h264_pps_info->chroma_qp_index_offset;
+	}
+
+	if (swsr_check_exception(swsr_context) != SWSR_EXCEPT_NO_EXCEPTION)
+		pps_parse_error |= BSPP_ERROR_INSUFFICIENT_DATA;
+
+	return pps_parse_error;
+}
+
+static int bspp_h264_release_sequ_hdr_info(void *str_alloc, void *secure_sps_info)
+{
+	struct bspp_h264_seq_hdr_info *h264_seq_hdr_info =
+					(struct bspp_h264_seq_hdr_info *)secure_sps_info;
+
+	if (!h264_seq_hdr_info)
+		return IMG_ERROR_INVALID_PARAMETERS;
+
+	return 0;
+}
+
+static int bspp_h264_reset_seq_hdr_info(void *secure_sps_info)
+{
+	struct bspp_h264_seq_hdr_info *h264_seq_hdr_info = NULL;
+	unsigned int *nal_hrd_bitrate_valueminus1 = NULL;
+	unsigned int *vcl_hrd_bitrate_valueminus1 = NULL;
+	unsigned int *nal_hrd_cpbsize_valueminus1 = NULL;
+	unsigned int *vcl_hrd_cpbsize_valueminus1 = NULL;
+	unsigned char *nal_hrd_cbrflag = NULL;
+	unsigned char *vcl_hrd_cbrflag = NULL;
+	unsigned int *offset_for_ref_frame = NULL;
+	unsigned char *scllst4x4seq = NULL;
+	unsigned char *scllst8x8seq = NULL;
+
+	if (!secure_sps_info)
+		return IMG_ERROR_INVALID_PARAMETERS;
+
+	h264_seq_hdr_info = (struct bspp_h264_seq_hdr_info *)secure_sps_info;
+
+	offset_for_ref_frame = h264_seq_hdr_info->sps_info.offset_for_ref_frame;
+	scllst4x4seq = h264_seq_hdr_info->sps_info.scllst4x4seq;
+	scllst8x8seq = h264_seq_hdr_info->sps_info.scllst8x8seq;
+	nal_hrd_bitrate_valueminus1 =
+		h264_seq_hdr_info->vui_info.nal_hrd_parameters.bit_rate_value_minus1;
+	vcl_hrd_bitrate_valueminus1 =
+		h264_seq_hdr_info->vui_info.vcl_hrd_parameters.bit_rate_value_minus1;
+	nal_hrd_cpbsize_valueminus1 =
+		h264_seq_hdr_info->vui_info.nal_hrd_parameters.cpb_size_value_minus1;
+	vcl_hrd_cpbsize_valueminus1 =
+		h264_seq_hdr_info->vui_info.vcl_hrd_parameters.cpb_size_value_minus1;
+	nal_hrd_cbrflag = h264_seq_hdr_info->vui_info.nal_hrd_parameters.cbr_flag;
+	vcl_hrd_cbrflag = h264_seq_hdr_info->vui_info.vcl_hrd_parameters.cbr_flag;
+
+	/* Cleaning vui_info */
+	if (h264_seq_hdr_info->vui_info.nal_hrd_parameters.bit_rate_value_minus1)
+		memset(h264_seq_hdr_info->vui_info.nal_hrd_parameters.bit_rate_value_minus1,
+		       0x00, VDEC_H264_MAXIMUMVALUEOFCPB_CNT * sizeof(unsigned int));
+
+	if (h264_seq_hdr_info->vui_info.nal_hrd_parameters.cpb_size_value_minus1)
+		memset(h264_seq_hdr_info->vui_info.nal_hrd_parameters.cpb_size_value_minus1,
+		       0x00, VDEC_H264_MAXIMUMVALUEOFCPB_CNT * sizeof(unsigned int));
+
+	if (h264_seq_hdr_info->vui_info.vcl_hrd_parameters.cpb_size_value_minus1)
+		memset(h264_seq_hdr_info->vui_info.vcl_hrd_parameters.cpb_size_value_minus1,
+		       0x00, VDEC_H264_MAXIMUMVALUEOFCPB_CNT * sizeof(unsigned int));
+
+	if (h264_seq_hdr_info->vui_info.nal_hrd_parameters.cbr_flag)
+		memset(h264_seq_hdr_info->vui_info.nal_hrd_parameters.cbr_flag,
+		       0x00, VDEC_H264_MAXIMUMVALUEOFCPB_CNT * sizeof(unsigned char));
+
+	if (h264_seq_hdr_info->vui_info.vcl_hrd_parameters.cbr_flag)
+		memset(h264_seq_hdr_info->vui_info.vcl_hrd_parameters.cbr_flag,
+		       0x00, VDEC_H264_MAXIMUMVALUEOFCPB_CNT * sizeof(unsigned char));
+
+	/* Cleaning sps_info */
+	if (h264_seq_hdr_info->sps_info.offset_for_ref_frame)
+		memset(h264_seq_hdr_info->sps_info.offset_for_ref_frame, 0x00,
+		       H264FW_MAX_CYCLE_REF_FRAMES * sizeof(unsigned int));
+
+	if (h264_seq_hdr_info->sps_info.scllst4x4seq)
+		memset(h264_seq_hdr_info->sps_info.scllst4x4seq, 0x00,
+		       sizeof(unsigned char[H264FW_NUM_4X4_LISTS][H264FW_4X4_SIZE]));
+
+	if (h264_seq_hdr_info->sps_info.scllst8x8seq)
+		memset(h264_seq_hdr_info->sps_info.scllst8x8seq, 0x00,
+		       sizeof(unsigned char[H264FW_NUM_8X8_LISTS][H264FW_8X8_SIZE]));
+
+	/* Erasing the structure */
+	memset(h264_seq_hdr_info, 0, sizeof(*h264_seq_hdr_info));
+
+	/* Restoring pointers */
+	h264_seq_hdr_info->sps_info.offset_for_ref_frame = offset_for_ref_frame;
+	h264_seq_hdr_info->sps_info.scllst4x4seq = scllst4x4seq;
+	h264_seq_hdr_info->sps_info.scllst8x8seq = scllst8x8seq;
+
+	h264_seq_hdr_info->vui_info.nal_hrd_parameters.bit_rate_value_minus1 =
+		nal_hrd_bitrate_valueminus1;
+	h264_seq_hdr_info->vui_info.vcl_hrd_parameters.bit_rate_value_minus1 =
+		vcl_hrd_bitrate_valueminus1;
+
+	h264_seq_hdr_info->vui_info.nal_hrd_parameters.cpb_size_value_minus1 =
+		nal_hrd_cpbsize_valueminus1;
+	h264_seq_hdr_info->vui_info.vcl_hrd_parameters.cpb_size_value_minus1 =
+		vcl_hrd_cpbsize_valueminus1;
+
+	h264_seq_hdr_info->vui_info.nal_hrd_parameters.cbr_flag = nal_hrd_cbrflag;
+	h264_seq_hdr_info->vui_info.vcl_hrd_parameters.cbr_flag = vcl_hrd_cbrflag;
+
+	return 0;
+}
+
+static int bspp_h264_reset_pps_info(void *secure_pps_info)
+{
+	struct bspp_h264_pps_info *h264_pps_info = NULL;
+	unsigned short slicegroupidnum = 0;
+	unsigned char *slice_group_id = NULL;
+	unsigned char *scllst4x4pic = NULL;
+	unsigned char *scllst8x8pic = NULL;
+
+	if (!secure_pps_info)
+		return IMG_ERROR_INVALID_PARAMETERS;
+
+	h264_pps_info = (struct bspp_h264_pps_info *)secure_pps_info;
+
+	/*
+	 * Storing temp values (we want to leave the SGM structure
+	 * it may be useful again instead of reallocating later
+	 */
+	slice_group_id = h264_pps_info->h264_ppssgm_info.slice_group_id;
+	slicegroupidnum = h264_pps_info->h264_ppssgm_info.slicegroupidnum;
+	scllst4x4pic = h264_pps_info->scllst4x4pic;
+	scllst8x8pic = h264_pps_info->scllst8x8pic;
+
+	if (h264_pps_info->h264_ppssgm_info.slice_group_id)
+		memset(h264_pps_info->h264_ppssgm_info.slice_group_id, 0x00,
+		       h264_pps_info->h264_ppssgm_info.slicegroupidnum * sizeof(unsigned char));
+
+	if (h264_pps_info->scllst4x4pic)
+		memset(h264_pps_info->scllst4x4pic, 0x00,
+		       sizeof(unsigned char[H264FW_NUM_4X4_LISTS][H264FW_4X4_SIZE]));
+
+	if (h264_pps_info->scllst8x8pic)
+		memset(h264_pps_info->scllst8x8pic, 0x00,
+		       sizeof(unsigned char[H264FW_NUM_8X8_LISTS][H264FW_8X8_SIZE]));
+
+	/* Erasing the structure */
+	memset(h264_pps_info, 0x00, sizeof(*h264_pps_info));
+
+	/* Copy the temp variable back */
+	h264_pps_info->h264_ppssgm_info.slicegroupidnum = slicegroupidnum;
+	h264_pps_info->h264_ppssgm_info.slice_group_id = slice_group_id;
+	h264_pps_info->scllst4x4pic = scllst4x4pic;
+	h264_pps_info->scllst8x8pic = scllst8x8pic;
+
+	return 0;
+}
+
+static enum bspp_error_type bspp_h264_pict_hdr_parser
+					(void *swsr_context, void *str_res,
+					 struct bspp_h264_slice_hdr_info *h264_slice_hdr_info,
+					 struct bspp_pps_info **pps_info,
+					 struct bspp_sequence_hdr_info **seq_hdr_info,
+					 enum h264_nalunittype nal_unit_type,
+					 unsigned char nal_ref_idc)
+{
+	enum bspp_error_type slice_parse_error = BSPP_ERROR_NONE;
+	struct bspp_h264_pps_info *h264_pps_info;
+	struct bspp_pps_info *pps_info_loc;
+	struct bspp_h264_seq_hdr_info *h264_seq_hdr_info;
+	struct bspp_sequence_hdr_info *seq_hdr_info_loc;
+	int id_loc;
+
+	VDEC_ASSERT(swsr_context);
+
+	memset(h264_slice_hdr_info, 0, sizeof(*h264_slice_hdr_info));
+
+	h264_slice_hdr_info->first_mb_in_slice = swsr_read_unsigned_expgoulomb(swsr_context);
+	h264_slice_hdr_info->slice_type = (enum bspp_h264_slice_type)swsr_read_unsigned_expgoulomb
+										(swsr_context);
+	if ((unsigned int)h264_slice_hdr_info->slice_type > 9) {
+		pr_err("Slice Type [%d] invalid, set to P", h264_slice_hdr_info->slice_type);
+		h264_slice_hdr_info->slice_type = (enum bspp_h264_slice_type)0;
+		slice_parse_error |= BSPP_ERROR_CORRECTION_VALIDVALUE;
+	}
+	h264_slice_hdr_info->slice_type =
+		(enum bspp_h264_slice_type)(h264_slice_hdr_info->slice_type % 5);
+
+	h264_slice_hdr_info->pps_id = swsr_read_unsigned_expgoulomb(swsr_context);
+	if (h264_slice_hdr_info->pps_id >= MAX_PPS_COUNT) {
+		pr_err("Picture Parameter ID [%d] invalid, set to 0", h264_slice_hdr_info->pps_id);
+		h264_slice_hdr_info->pps_id = 0;
+		slice_parse_error |= BSPP_ERROR_CORRECTION_VALIDVALUE;
+	}
+
+	/* Set relevant PPS and SPS */
+	pps_info_loc = bspp_get_pps_hdr(str_res, h264_slice_hdr_info->pps_id);
+
+	if (!pps_info_loc) {
+		slice_parse_error |= BSPP_ERROR_NO_PPS;
+		goto error;
+	}
+	h264_pps_info = (struct bspp_h264_pps_info *)pps_info_loc->secure_pps_info;
+	if (!h264_pps_info) {
+		slice_parse_error |= BSPP_ERROR_NO_PPS;
+		goto error;
+	}
+	VDEC_ASSERT(h264_pps_info->pps_id == h264_slice_hdr_info->pps_id);
+	*pps_info = pps_info_loc;
+
+	/* seq_parameter_set_id is always in range 0-31,
+	 * so we can add offset indicating subsequence header
+	 */
+	id_loc = h264_pps_info->seq_parameter_set_id;
+	id_loc = (nal_unit_type == H264_NALTYPE_SLICE_SCALABLE ||
+		nal_unit_type == H264_NALTYPE_SLICE_IDR_SCALABLE ||
+		nal_unit_type == H264_NALTYPE_SUBSET_SPS) ? id_loc + 32 : id_loc;
+
+	seq_hdr_info_loc = bspp_get_sequ_hdr(str_res, id_loc);
+
+	if (!seq_hdr_info_loc) {
+		slice_parse_error |= BSPP_ERROR_NO_SEQUENCE_HDR;
+		goto error;
+	}
+	h264_seq_hdr_info = (struct bspp_h264_seq_hdr_info *)seq_hdr_info_loc->secure_sequence_info;
+	VDEC_ASSERT((unsigned int)h264_seq_hdr_info->sps_info.seq_parameter_set_id ==
+		h264_pps_info->seq_parameter_set_id);
+	*seq_hdr_info = seq_hdr_info_loc;
+
+	/*
+	 * For MINIMAL parsing in secure mode, slice header parsing can stop
+	 * here, may be problematic with field-coded streams and splitting
+	 * fields
+	 */
+	if (h264_seq_hdr_info->sps_info.separate_colour_plane_flag)
+		h264_slice_hdr_info->colour_plane_id = swsr_read_bits(swsr_context, 2);
+
+	else
+		h264_slice_hdr_info->colour_plane_id = 0;
+
+	h264_slice_hdr_info->frame_num = swsr_read_bits
+					(swsr_context,
+					 h264_seq_hdr_info->sps_info.log2_max_frame_num_minus4
+					 + 4);
+
+	VDEC_ASSERT(h264_slice_hdr_info->frame_num <
+		(1UL << (h264_seq_hdr_info->sps_info.log2_max_frame_num_minus4 + 4)));
+
+	if (!h264_seq_hdr_info->sps_info.frame_mbs_only_flag) {
+		if (h264_slice_hdr_info->slice_type == B_SLICE &&
+		    !h264_seq_hdr_info->sps_info.direct_8x8_inference_flag)
+			slice_parse_error |= BSPP_ERROR_INVALID_VALUE;
+
+		h264_slice_hdr_info->field_pic_flag = swsr_read_bits(swsr_context, 1);
+		if (h264_slice_hdr_info->field_pic_flag)
+			h264_slice_hdr_info->bottom_field_flag = swsr_read_bits(swsr_context, 1);
+		else
+			h264_slice_hdr_info->bottom_field_flag = 0;
+	} else {
+		h264_slice_hdr_info->field_pic_flag = 0;
+		h264_slice_hdr_info->bottom_field_flag = 0;
+	}
+
+	/*
+	 * At this point we have everything we need, but we still lack all the
+	 * conditions for detecting new pictures (needed for error cases)
+	 */
+	if (nal_unit_type == H264_NALTYPE_IDR_SLICE)
+		h264_slice_hdr_info->idr_pic_id = swsr_read_unsigned_expgoulomb(swsr_context);
+
+	if (h264_seq_hdr_info->sps_info.pic_order_cnt_type == 0) {
+		h264_slice_hdr_info->pic_order_cnt_lsb = swsr_read_bits
+				(swsr_context,
+				 h264_seq_hdr_info->sps_info.log2_max_pic_order_cnt_lsb_minus4 + 4);
+		if (h264_pps_info->pic_order_present_flag && !h264_slice_hdr_info->field_pic_flag)
+			h264_slice_hdr_info->delta_pic_order_cnt_bottom =
+							swsr_read_signed_expgoulomb(swsr_context);
+	}
+
+	if (h264_seq_hdr_info->sps_info.pic_order_cnt_type == 1 &&
+	    !h264_seq_hdr_info->sps_info.delta_pic_order_always_zero_flag) {
+		h264_slice_hdr_info->delta_pic_order_cnt[0] = swsr_read_signed_expgoulomb
+									(swsr_context);
+		if (h264_pps_info->pic_order_present_flag && !h264_slice_hdr_info->field_pic_flag)
+			h264_slice_hdr_info->delta_pic_order_cnt[1] = swsr_read_signed_expgoulomb
+										(swsr_context);
+	}
+
+	if (h264_pps_info->redundant_pic_cnt_present_flag)
+		h264_slice_hdr_info->redundant_pic_cnt =
+			swsr_read_unsigned_expgoulomb(swsr_context);
+
+	/* For FMO streams, we need to go further */
+	if (h264_pps_info->num_slice_groups_minus1 != 0 &&
+	    h264_pps_info->slice_group_map_type >= 3 &&
+	    h264_pps_info->slice_group_map_type <= 5) {
+		if (h264_slice_hdr_info->slice_type == B_SLICE)
+			swsr_read_bits(swsr_context, 1);
+
+		if (h264_slice_hdr_info->slice_type == P_SLICE ||
+		    h264_slice_hdr_info->slice_type == SP_SLICE ||
+		    h264_slice_hdr_info->slice_type == B_SLICE) {
+			h264_slice_hdr_info->num_ref_idx_active_override_flag =
+				swsr_read_bits(swsr_context, 1);
+			if (h264_slice_hdr_info->num_ref_idx_active_override_flag) {
+				h264_slice_hdr_info->num_ref_idx_lx_active_minus1[0] =
+					swsr_read_unsigned_expgoulomb(swsr_context);
+				if (h264_slice_hdr_info->slice_type == B_SLICE)
+					h264_slice_hdr_info->num_ref_idx_lx_active_minus1[1] =
+						swsr_read_unsigned_expgoulomb(swsr_context);
+			}
+		}
+
+		if (h264_slice_hdr_info->slice_type != SI_SLICE &&
+		    h264_slice_hdr_info->slice_type != I_SLICE) {
+			/* Reference picture list modification */
+			/* parse reordering info and pack into commands */
+			unsigned int i;
+			unsigned int cmd_num, list_num;
+			unsigned int command;
+
+			i = (h264_slice_hdr_info->slice_type == B_SLICE) ? 2 : 1;
+
+			for (list_num = 0; list_num < i; list_num++) {
+				cmd_num = 0;
+				if (swsr_read_bits(swsr_context, 1)) {
+					do {
+						command =
+							swsr_read_unsigned_expgoulomb(swsr_context);
+					if (command != 3) {
+						swsr_read_unsigned_expgoulomb(swsr_context);
+						cmd_num++;
+					}
+					} while (command != 3 && cmd_num <= SL_MAX_REF_IDX);
+				}
+			}
+		}
+
+		if ((h264_pps_info->weighted_pred_flag &&
+		     h264_slice_hdr_info->slice_type == P_SLICE) ||
+		    (h264_pps_info->weighted_bipred_idc &&
+		     h264_slice_hdr_info->slice_type == B_SLICE)) {
+			int mono_chrome;
+			unsigned int list, i, j, k;
+
+			mono_chrome = (!h264_seq_hdr_info->sps_info.chroma_format_idc) ? 1 : 0;
+
+			swsr_read_unsigned_expgoulomb(swsr_context);
+			if (!mono_chrome)
+				swsr_read_unsigned_expgoulomb(swsr_context);
+
+			k = (h264_slice_hdr_info->slice_type == B_SLICE) ? 2 : 1;
+
+			for (list = 0; list < k; list++) {
+				for (i = 0;
+					i <=
+					h264_slice_hdr_info->num_ref_idx_lx_active_minus1[list];
+					i++) {
+					if (swsr_read_bits(swsr_context, 1)) {
+						swsr_read_signed_expgoulomb(swsr_context);
+						swsr_read_signed_expgoulomb(swsr_context);
+					}
+
+				if (!mono_chrome && (swsr_read_bits(swsr_context, 1))) {
+					for (j = 0; j < 2; j++) {
+						swsr_read_signed_expgoulomb
+								(swsr_context);
+						swsr_read_signed_expgoulomb
+								(swsr_context);
+					}
+				}
+				}
+			}
+		}
+
+		if (nal_ref_idc != 0) {
+			unsigned int memmanop;
+
+			if (nal_unit_type == H264_NALTYPE_IDR_SLICE) {
+				swsr_read_bits(swsr_context, 1);
+				swsr_read_bits(swsr_context, 1);
+			}
+			if (swsr_read_bits(swsr_context, 1)) {
+				do {
+					/* clamp 0--6 */
+					memmanop = swsr_read_unsigned_expgoulomb
+								(swsr_context);
+				if (memmanop != 0 && memmanop != 5) {
+					if (memmanop == 3) {
+						swsr_read_unsigned_expgoulomb
+							(swsr_context);
+						swsr_read_unsigned_expgoulomb
+							(swsr_context);
+					} else {
+						swsr_read_unsigned_expgoulomb
+							(swsr_context);
+					}
+				}
+				} while (memmanop != 0);
+			}
+		}
+
+		if (h264_pps_info->entropy_coding_mode_flag &&
+		    h264_slice_hdr_info->slice_type != I_SLICE)
+			swsr_read_unsigned_expgoulomb(swsr_context);
+
+		swsr_read_signed_expgoulomb(swsr_context);
+
+		if (h264_slice_hdr_info->slice_type == SP_SLICE ||
+		    h264_slice_hdr_info->slice_type == SI_SLICE) {
+			if (h264_slice_hdr_info->slice_type == SP_SLICE)
+				swsr_read_bits(swsr_context, 1);
+
+			/* slice_qs_delta */
+			swsr_read_signed_expgoulomb(swsr_context);
+		}
+
+		if (h264_pps_info->deblocking_filter_control_present_flag) {
+			if (swsr_read_unsigned_expgoulomb(swsr_context) != 1) {
+				swsr_read_signed_expgoulomb(swsr_context);
+				swsr_read_signed_expgoulomb(swsr_context);
+			}
+		}
+
+		if (h264_pps_info->slice_group_map_type >= 3 &&
+		    h264_pps_info->slice_group_map_type <= 5) {
+			unsigned int num_slice_group_map_units =
+				(h264_seq_hdr_info->sps_info.pic_height_in_map_units_minus1 + 1) *
+				(h264_seq_hdr_info->sps_info.pic_width_in_mbs_minus1 + 1);
+
+			unsigned short slice_group_change_rate =
+				(h264_pps_info->slice_group_change_rate_minus1 + 1);
+
+			unsigned int width = h264ceillog2(num_slice_group_map_units /
+					slice_group_change_rate +
+					(num_slice_group_map_units % slice_group_change_rate ==
+					 0 ? 0 : 1) + 1);                          /* (7-32) */
+			h264_slice_hdr_info->slice_group_change_cycle = swsr_read_bits(swsr_context,
+										       width);
+		}
+	}
+
+error:
+	return slice_parse_error;
+}
+
+static void bspp_h264_select_scaling_list(struct h264fw_picture_ps *h264fw_pps_info,
+					  struct bspp_h264_pps_info *h264_pps_info,
+					  struct bspp_h264_seq_hdr_info *h264_seq_hdr_info)
+{
+	unsigned int num8x8_lists;
+	unsigned int i;
+	const unsigned char *quant_matrix = NULL;
+	unsigned char (*scllst4x4pic)[H264FW_NUM_4X4_LISTS][H264FW_4X4_SIZE] =
+	(unsigned char (*)[H264FW_NUM_4X4_LISTS][H264FW_4X4_SIZE])h264_pps_info->scllst4x4pic;
+	unsigned char (*scllst8x8pic)[H264FW_NUM_8X8_LISTS][H264FW_8X8_SIZE] =
+	(unsigned char (*)[H264FW_NUM_8X8_LISTS][H264FW_8X8_SIZE])h264_pps_info->scllst8x8pic;
+
+	unsigned char (*scllst4x4seq)[H264FW_NUM_4X4_LISTS][H264FW_4X4_SIZE] =
+		(unsigned char (*)[H264FW_NUM_4X4_LISTS][H264FW_4X4_SIZE])
+		h264_seq_hdr_info->sps_info.scllst4x4seq;
+	unsigned char (*scllst8x8seq)[H264FW_NUM_8X8_LISTS][H264FW_8X8_SIZE] =
+		(unsigned char (*)[H264FW_NUM_8X8_LISTS][H264FW_8X8_SIZE])
+		h264_seq_hdr_info->sps_info.scllst8x8seq;
+
+	if (h264_seq_hdr_info->sps_info.seq_scaling_matrix_present_flag) {
+		VDEC_ASSERT(h264_seq_hdr_info->sps_info.scllst4x4seq);
+		VDEC_ASSERT(h264_seq_hdr_info->sps_info.scllst8x8seq);
+	}
+
+	if (h264_pps_info->pic_scaling_matrix_present_flag) {
+		for (i = 0; i < H264FW_NUM_4X4_LISTS; i++) {
+			if (h264_pps_info->pic_scaling_list_present_flag[i]) {
+				if (h264_pps_info->usedefaultscalingmatrixflag_pic[i])
+					quant_matrix =
+						(i > 2) ? default_4x4_inter : default_4x4_intra;
+				else
+					quant_matrix = (*scllst4x4pic)[i];
+
+			} else {
+				if (h264_seq_hdr_info->sps_info.seq_scaling_matrix_present_flag) {
+					/* SPS matrix present - use fallback rule B */
+					/* first 4x4 Intra list */
+					if (i == 0) {
+						if
+				(h264_seq_hdr_info->sps_info.seq_scaling_list_present_flag[i] &&
+				!h264_seq_hdr_info->sps_info.usedefaultscalingmatrixflag_seq[i]) {
+							VDEC_ASSERT
+							(h264_seq_hdr_info->sps_info.scllst4x4seq);
+					if (scllst4x4seq)
+						quant_matrix = (*scllst4x4seq)[i];
+					} else {
+						quant_matrix = default_4x4_intra;
+					}
+					}
+					/* first 4x4 Inter list */
+					else if (i == 3) {
+						if
+				(h264_seq_hdr_info->sps_info.seq_scaling_list_present_flag[i] &&
+				!h264_seq_hdr_info->sps_info.usedefaultscalingmatrixflag_seq[i]) {
+							VDEC_ASSERT
+							(h264_seq_hdr_info->sps_info.scllst4x4seq);
+					if (scllst4x4seq)
+						quant_matrix = (*scllst4x4seq)[i];
+					} else {
+						quant_matrix = default_4x4_inter;
+					}
+					} else {
+						quant_matrix =
+							h264fw_pps_info->scalinglist4x4[i - 1];
+					}
+				} else {
+					/* SPS matrix not present - use fallback rule A */
+					/* first 4x4 Intra list */
+					if (i == 0)
+						quant_matrix = default_4x4_intra;
+					/* first 4x4 Interlist */
+					else if (i == 3)
+						quant_matrix = default_4x4_inter;
+					else
+						quant_matrix =
+							h264fw_pps_info->scalinglist4x4[i - 1];
+				}
+			}
+			if (!quant_matrix) {
+				VDEC_ASSERT(0);
+				return;
+			}
+			/* copy correct 4x4 list to output - as selected by PPS */
+			memcpy(h264fw_pps_info->scalinglist4x4[i], quant_matrix,
+			       sizeof(h264fw_pps_info->scalinglist4x4[i]));
+		}
+	} else {
+		/* PPS matrix not present, use SPS information */
+		if (h264_seq_hdr_info->sps_info.seq_scaling_matrix_present_flag) {
+			for (i = 0; i < H264FW_NUM_4X4_LISTS; i++) {
+				if (h264_seq_hdr_info->sps_info.seq_scaling_list_present_flag[i]) {
+					if
+					(h264_seq_hdr_info->sps_info.usedefaultscalingmatrixflag_seq
+											[i]) {
+						quant_matrix = (i > 2) ? default_4x4_inter
+							: default_4x4_intra;
+					} else {
+						VDEC_ASSERT
+							(h264_seq_hdr_info->sps_info.scllst4x4seq);
+					if (scllst4x4seq)
+						quant_matrix = (*scllst4x4seq)[i];
+					}
+				} else {
+					/* SPS list not present - use fallback rule A */
+					/* first 4x4 Intra list */
+					if (i == 0)
+						quant_matrix = default_4x4_intra;
+					else if (i == 3) /* first 4x4 Inter list */
+						quant_matrix = default_4x4_inter;
+					else
+						quant_matrix =
+							h264fw_pps_info->scalinglist4x4[i - 1];
+				}
+				if (quant_matrix) {
+					/* copy correct 4x4 list to output - as selected by SPS */
+					memcpy(h264fw_pps_info->scalinglist4x4[i], quant_matrix,
+					       sizeof(h264fw_pps_info->scalinglist4x4[i]));
+				}
+			}
+		} else {
+			/* SPS matrix not present - use flat lists */
+			quant_matrix = default_4x4_org;
+			for (i = 0; i < H264FW_NUM_4X4_LISTS; i++)
+				memcpy(h264fw_pps_info->scalinglist4x4[i], quant_matrix,
+				       sizeof(h264fw_pps_info->scalinglist4x4[i]));
+		}
+	}
+
+	/* 8x8 matrices */
+	num8x8_lists = (h264_seq_hdr_info->sps_info.chroma_format_idc == 3) ? 6 : 2;
+	if (h264_pps_info->transform_8x8_mode_flag) {
+		unsigned char *seq_scllstflg =
+			h264_seq_hdr_info->sps_info.seq_scaling_list_present_flag;
+		unsigned char *def_sclmatflg_seq =
+			h264_seq_hdr_info->sps_info.usedefaultscalingmatrixflag_seq;
+
+		if (h264_pps_info->pic_scaling_matrix_present_flag) {
+			for (i = 0; i < num8x8_lists; i++) {
+				if (h264_pps_info->pic_scaling_list_present_flag[i +
+					H264FW_NUM_4X4_LISTS]) {
+					if (h264_pps_info->usedefaultscalingmatrixflag_pic[i +
+							H264FW_NUM_4X4_LISTS]) {
+						quant_matrix = (i & 0x1) ? default_8x8_inter
+							: default_8x8_intra;
+					} else {
+						VDEC_ASSERT(h264_pps_info->scllst8x8pic);
+					if (scllst8x8pic)
+						quant_matrix = (*scllst8x8pic)[i];
+					}
+				} else {
+					if
+				(h264_seq_hdr_info->sps_info.seq_scaling_matrix_present_flag) {
+						/* SPS matrix present - use fallback rule B */
+						/* list 6 - first 8x8 Intra list */
+				if (i == 0) {
+					if (seq_scllstflg[i +
+						H264FW_NUM_4X4_LISTS] &&
+						!def_sclmatflg_seq[i +
+						H264FW_NUM_4X4_LISTS]) {
+						VDEC_ASSERT
+						(h264_seq_hdr_info->sps_info.scllst8x8seq);
+					if (scllst8x8seq)
+						quant_matrix = (*scllst8x8seq)[i];
+					} else {
+						quant_matrix = default_8x8_intra;
+						}
+				/* list 7 - first 8x8 Inter list */
+				} else if (i == 1) {
+					if (seq_scllstflg[i +
+							H264FW_NUM_4X4_LISTS] &&
+							!def_sclmatflg_seq[i +
+							H264FW_NUM_4X4_LISTS]) {
+						VDEC_ASSERT
+						(h264_seq_hdr_info->sps_info.scllst8x8seq);
+					if (scllst8x8seq)
+						quant_matrix = (*scllst8x8seq)[i];
+					} else {
+						quant_matrix = default_8x8_inter;
+					}
+					} else {
+						quant_matrix =
+							h264fw_pps_info->scalinglist8x8[i - 2];
+					}
+				} else {
+						/* SPS matrix not present - use fallback rule A */
+						/* list 6 - first 8x8 Intra list */
+					if (i == 0)
+						quant_matrix = default_8x8_intra;
+					/* list 7 - first 8x8 Inter list */
+					else if (i == 1)
+						quant_matrix = default_8x8_inter;
+					else
+						quant_matrix =
+							h264fw_pps_info->scalinglist8x8[i - 2];
+				}
+				}
+				if (quant_matrix) {
+					/* copy correct 8x8 list to output - as selected by PPS */
+					memcpy(h264fw_pps_info->scalinglist8x8[i], quant_matrix,
+					       sizeof(h264fw_pps_info->scalinglist8x8[i]));
+				}
+			}
+		} else {
+			/* PPS matrix not present, use SPS information */
+			if (h264_seq_hdr_info->sps_info.seq_scaling_matrix_present_flag) {
+				for (i = 0; i < num8x8_lists; i++) {
+					if (seq_scllstflg[i + H264FW_NUM_4X4_LISTS] &&
+					    def_sclmatflg_seq[i + H264FW_NUM_4X4_LISTS]) {
+						quant_matrix =
+							(i & 0x1) ? default_8x8_inter :
+							default_8x8_intra;
+					} else if ((seq_scllstflg[i + H264FW_NUM_4X4_LISTS]) &&
+						   !(def_sclmatflg_seq[i + H264FW_NUM_4X4_LISTS])) {
+						VDEC_ASSERT
+							(h264_seq_hdr_info->sps_info.scllst8x8seq);
+					if (scllst8x8seq)
+						quant_matrix = (*scllst8x8seq)[i];
+					} else if (!(seq_scllstflg[i + H264FW_NUM_4X4_LISTS]) &&
+						   (i == 0)) {
+						/* SPS list not present - use fallback rule A */
+						/* list 6 - first 8x8 Intra list */
+						quant_matrix = default_8x8_intra;
+					} else if (!(seq_scllstflg[i + H264FW_NUM_4X4_LISTS]) &&
+						   (i == 1)) {
+						/* list 7 - first 8x8 Inter list */
+						quant_matrix = default_8x8_inter;
+					} else {
+						quant_matrix =
+							h264fw_pps_info->scalinglist8x8
+							[i - 2];
+					}
+					if (quant_matrix) {
+						/* copy correct 8x8 list to output -
+						 * as selected by SPS
+						 */
+						memcpy(h264fw_pps_info->scalinglist8x8[i],
+						       quant_matrix,
+						       sizeof(h264fw_pps_info->scalinglist8x8[i]));
+					}
+				}
+			} else {
+				/* SPS matrix not present - use flat lists */
+				quant_matrix = default_8x8_org;
+				for (i = 0; i < num8x8_lists; i++)
+					memcpy(h264fw_pps_info->scalinglist8x8[i], quant_matrix,
+					       sizeof(h264fw_pps_info->scalinglist8x8[i]));
+			}
+		}
+	}
+}
+
+static void bspp_h264_fwpps_populate(struct bspp_h264_pps_info *h264_pps_info,
+				     struct h264fw_picture_ps *h264fw_pps_info)
+{
+	h264fw_pps_info->deblocking_filter_control_present_flag =
+					h264_pps_info->deblocking_filter_control_present_flag;
+	h264fw_pps_info->transform_8x8_mode_flag = h264_pps_info->transform_8x8_mode_flag;
+	h264fw_pps_info->entropy_coding_mode_flag = h264_pps_info->entropy_coding_mode_flag;
+	h264fw_pps_info->redundant_pic_cnt_present_flag =
+						h264_pps_info->redundant_pic_cnt_present_flag;
+	h264fw_pps_info->weighted_bipred_idc = h264_pps_info->weighted_bipred_idc;
+	h264fw_pps_info->weighted_pred_flag = h264_pps_info->weighted_pred_flag;
+	h264fw_pps_info->pic_order_present_flag = h264_pps_info->pic_order_present_flag;
+	h264fw_pps_info->pic_init_qp = h264_pps_info->pic_init_qp_minus26 + 26;
+	h264fw_pps_info->constrained_intra_pred_flag = h264_pps_info->constrained_intra_pred_flag;
+	VDEC_ASSERT(sizeof(h264fw_pps_info->num_ref_lx_active_minus1) ==
+		    sizeof(h264_pps_info->num_ref_idx_lx_active_minus1));
+	VDEC_ASSERT(sizeof(h264fw_pps_info->num_ref_lx_active_minus1) ==
+		    sizeof(unsigned char) * H264FW_MAX_REFPIC_LISTS);
+	memcpy(h264fw_pps_info->num_ref_lx_active_minus1,
+	       h264_pps_info->num_ref_idx_lx_active_minus1,
+	       sizeof(h264fw_pps_info->num_ref_lx_active_minus1));
+	h264fw_pps_info->slice_group_map_type = h264_pps_info->slice_group_map_type;
+	h264fw_pps_info->num_slice_groups_minus1 = h264_pps_info->num_slice_groups_minus1;
+	h264fw_pps_info->slice_group_change_rate_minus1 =
+					h264_pps_info->slice_group_change_rate_minus1;
+	h264fw_pps_info->chroma_qp_index_offset = h264_pps_info->chroma_qp_index_offset;
+	h264fw_pps_info->second_chroma_qp_index_offset =
+						h264_pps_info->second_chroma_qp_index_offset;
+}
+
+static void bspp_h264_fwseq_hdr_populate(struct bspp_h264_seq_hdr_info *h264_seq_hdr_info,
+					 struct h264fw_sequence_ps *h264_fwseq_hdr_info)
+{
+	/* Basic SPS */
+	h264_fwseq_hdr_info->profile_idc = h264_seq_hdr_info->sps_info.profile_idc;
+	h264_fwseq_hdr_info->chroma_format_idc = h264_seq_hdr_info->sps_info.chroma_format_idc;
+	h264_fwseq_hdr_info->separate_colour_plane_flag =
+		h264_seq_hdr_info->sps_info.separate_colour_plane_flag;
+	h264_fwseq_hdr_info->bit_depth_luma_minus8 =
+		h264_seq_hdr_info->sps_info.bit_depth_luma_minus8;
+	h264_fwseq_hdr_info->bit_depth_chroma_minus8 =
+		h264_seq_hdr_info->sps_info.bit_depth_chroma_minus8;
+	h264_fwseq_hdr_info->delta_pic_order_always_zero_flag =
+		h264_seq_hdr_info->sps_info.delta_pic_order_always_zero_flag;
+	h264_fwseq_hdr_info->log2_max_pic_order_cnt_lsb =
+		h264_seq_hdr_info->sps_info.log2_max_pic_order_cnt_lsb_minus4 + 4;
+	h264_fwseq_hdr_info->max_num_ref_frames = h264_seq_hdr_info->sps_info.max_num_ref_frames;
+	h264_fwseq_hdr_info->log2_max_frame_num =
+		h264_seq_hdr_info->sps_info.log2_max_frame_num_minus4 + 4;
+	h264_fwseq_hdr_info->pic_order_cnt_type = h264_seq_hdr_info->sps_info.pic_order_cnt_type;
+	h264_fwseq_hdr_info->frame_mbs_only_flag = h264_seq_hdr_info->sps_info.frame_mbs_only_flag;
+	h264_fwseq_hdr_info->gaps_in_frame_num_value_allowed_flag =
+		h264_seq_hdr_info->sps_info.gaps_in_frame_num_value_allowed_flag;
+	h264_fwseq_hdr_info->constraint_set_flags =
+		h264_seq_hdr_info->sps_info.constraint_set_flags;
+	h264_fwseq_hdr_info->level_idc = h264_seq_hdr_info->sps_info.level_idc;
+	h264_fwseq_hdr_info->num_ref_frames_in_pic_order_cnt_cycle =
+		h264_seq_hdr_info->sps_info.num_ref_frames_in_pic_order_cnt_cycle;
+	h264_fwseq_hdr_info->mb_adaptive_frame_field_flag =
+		h264_seq_hdr_info->sps_info.mb_adaptive_frame_field_flag;
+	h264_fwseq_hdr_info->offset_for_non_ref_pic =
+		h264_seq_hdr_info->sps_info.offset_for_non_ref_pic;
+	h264_fwseq_hdr_info->offset_for_top_to_bottom_field =
+		h264_seq_hdr_info->sps_info.offset_for_top_to_bottom_field;
+	h264_fwseq_hdr_info->pic_width_in_mbs_minus1 =
+		h264_seq_hdr_info->sps_info.pic_width_in_mbs_minus1;
+	h264_fwseq_hdr_info->pic_height_in_map_units_minus1 =
+		h264_seq_hdr_info->sps_info.pic_height_in_map_units_minus1;
+	h264_fwseq_hdr_info->direct_8x8_inference_flag =
+		h264_seq_hdr_info->sps_info.direct_8x8_inference_flag;
+	h264_fwseq_hdr_info->qpprime_y_zero_transform_bypass_flag =
+		h264_seq_hdr_info->sps_info.qpprime_y_zero_transform_bypass_flag;
+
+	if (h264_seq_hdr_info->sps_info.offset_for_ref_frame)
+		memcpy(h264_fwseq_hdr_info->offset_for_ref_frame,
+		       h264_seq_hdr_info->sps_info.offset_for_ref_frame,
+		       sizeof(h264_fwseq_hdr_info->offset_for_ref_frame));
+	else
+		memset(h264_fwseq_hdr_info->offset_for_ref_frame, 0x00,
+		       sizeof(h264_fwseq_hdr_info->offset_for_ref_frame));
+
+	memset(h264_fwseq_hdr_info->anchor_inter_view_reference_id_list, 0x00,
+	       sizeof(h264_fwseq_hdr_info->anchor_inter_view_reference_id_list));
+	memset(h264_fwseq_hdr_info->non_anchor_inter_view_reference_id_list, 0x00,
+	       sizeof(h264_fwseq_hdr_info->non_anchor_inter_view_reference_id_list));
+
+#ifdef REDUCED_DPB_NO_PIC_REORDERING
+	/* From VUI */
+	h264_fwseq_hdr_info->max_dec_frame_buffering =
+		h264_seq_hdr_info->vui_info.max_dec_frame_buffering;
+	h264_fwseq_hdr_info->num_reorder_frames = h264_seq_hdr_info->vui_info.num_reorder_frames;
+#else
+	/* From VUI */
+	if (h264_seq_hdr_info->vui_info.bitstream_restriction_flag) {
+		VDEC_ASSERT(h264_seq_hdr_info->sps_info.vui_parameters_present_flag);
+		h264_fwseq_hdr_info->max_dec_frame_buffering =
+			h264_seq_hdr_info->vui_info.max_dec_frame_buffering;
+		h264_fwseq_hdr_info->num_reorder_frames =
+			h264_seq_hdr_info->vui_info.num_reorder_frames;
+	} else {
+		h264_fwseq_hdr_info->max_dec_frame_buffering = 1;
+		h264_fwseq_hdr_info->num_reorder_frames = 16;
+	}
+#endif
+}
+
+static void bspp_h264_commonseq_hdr_populate(struct bspp_h264_seq_hdr_info *h264_seq_hdr_info,
+					     struct vdec_comsequ_hdrinfo *comseq_hdr_info)
+{
+	struct bspp_h264_sps_info *sps_info = &h264_seq_hdr_info->sps_info;
+	struct bspp_h264_vui_info *vui_info = &h264_seq_hdr_info->vui_info;
+
+	comseq_hdr_info->codec_profile = sps_info->profile_idc;
+	comseq_hdr_info->codec_level = sps_info->level_idc;
+
+	if (sps_info->vui_parameters_present_flag && vui_info->timing_info_present_flag) {
+		comseq_hdr_info->frame_rate_num = vui_info->time_scale;
+		comseq_hdr_info->frame_rate_den = 2 * vui_info->num_units_in_tick;
+		comseq_hdr_info->frame_rate = ((long)comseq_hdr_info->frame_rate_num) /
+			((long)comseq_hdr_info->frame_rate_den);
+	}
+
+	/*
+	 * ColorSpace Description was present in the VUI parameters.
+	 * copy it in CommonSeqHdr info for use by application.
+	 */
+	if (vui_info->video_signal_type_present_flag & vui_info->colour_description_present_flag) {
+		comseq_hdr_info->color_space_info.is_present = TRUE;
+		comseq_hdr_info->color_space_info.color_primaries = vui_info->colour_primaries;
+		comseq_hdr_info->color_space_info.transfer_characteristics =
+			vui_info->transfer_characteristics;
+		comseq_hdr_info->color_space_info.matrix_coefficients =
+			vui_info->matrix_coefficients;
+	}
+
+	if (vui_info->aspect_ratio_info_present_flag) {
+		comseq_hdr_info->aspect_ratio_num = vui_info->sar_width;
+		comseq_hdr_info->aspect_ratio_den = vui_info->sar_height;
+	}
+
+	comseq_hdr_info->interlaced_frames = sps_info->frame_mbs_only_flag ? 0 : 1;
+
+	/* pixel_info populate */
+	VDEC_ASSERT(sps_info->chroma_format_idc < 4);
+	comseq_hdr_info->pixel_info.chroma_fmt = (sps_info->chroma_format_idc == 0) ? 0 : 1;
+	comseq_hdr_info->pixel_info.chroma_fmt_idc = pixel_format_idc[sps_info->chroma_format_idc];
+	comseq_hdr_info->pixel_info.chroma_interleave =
+		((sps_info->chroma_format_idc == 0) ||
+		(sps_info->chroma_format_idc == 3 && sps_info->separate_colour_plane_flag)) ?
+		PIXEL_INVALID_CI : PIXEL_UV_ORDER;
+	comseq_hdr_info->pixel_info.num_planes =
+		(sps_info->chroma_format_idc == 0) ? 1 :
+		(sps_info->chroma_format_idc == 3 && sps_info->separate_colour_plane_flag) ? 3 : 2;
+	comseq_hdr_info->pixel_info.bitdepth_y = sps_info->bit_depth_luma_minus8 + 8;
+	comseq_hdr_info->pixel_info.bitdepth_c = sps_info->bit_depth_chroma_minus8 + 8;
+	comseq_hdr_info->pixel_info.mem_pkg =
+		(comseq_hdr_info->pixel_info.bitdepth_y > 8 ||
+		comseq_hdr_info->pixel_info.bitdepth_c > 8) ?
+		PIXEL_BIT10_MSB_MP : PIXEL_BIT8_MP;
+	comseq_hdr_info->pixel_info.pixfmt =
+		pixel_get_pixfmt(comseq_hdr_info->pixel_info.chroma_fmt_idc,
+				 comseq_hdr_info->pixel_info.chroma_interleave,
+				 comseq_hdr_info->pixel_info.mem_pkg,
+				 comseq_hdr_info->pixel_info.bitdepth_y,
+				 comseq_hdr_info->pixel_info.bitdepth_c,
+				 comseq_hdr_info->pixel_info.num_planes);
+
+	/* max_frame_size populate */
+	comseq_hdr_info->max_frame_size.width = (sps_info->pic_width_in_mbs_minus1 + 1) * 16;
+	/*
+	 * H264 has always coded size MB aligned. For sequences which *may* have Field-Coded
+	 * pictures, as described by the frame_mbs_only_flag, the pic_height_in_map_units_minus1
+	 * refers to field height in MBs, so to find the actual Frame height we need to do
+	 * Field_MBs_InHeight * 32
+	 */
+	comseq_hdr_info->max_frame_size.height = (sps_info->pic_height_in_map_units_minus1 + 1) *
+						(sps_info->frame_mbs_only_flag ? 1 : 2) * 16;
+
+	/* Passing 2*N to vxd_dec so that get_nbuffers can use formula N+3 for all codecs*/
+	comseq_hdr_info->max_ref_frame_num  = 2 * sps_info->max_num_ref_frames;
+
+	comseq_hdr_info->field_codec_mblocks = sps_info->mb_adaptive_frame_field_flag;
+	comseq_hdr_info->min_pict_buf_num = vui_info->max_dec_frame_buffering;
+
+	/* orig_display_region populate */
+	if (sps_info->frame_cropping_flag) {
+		int sub_width_c, sub_height_c, crop_unit_x, crop_unit_y;
+		int frame_crop_left, frame_crop_right, frame_crop_top, frame_crop_bottom;
+
+		sub_width_c = bspp_h264_get_subwidthc(sps_info->chroma_format_idc,
+						      sps_info->separate_colour_plane_flag);
+
+		sub_height_c = bspp_h264_get_subheightc(sps_info->chroma_format_idc,
+							sps_info->separate_colour_plane_flag);
+
+		/* equation source: ITU-T H.264 2010/03, page 77 */
+		/* ChromaArrayType == 0 */
+		if (sps_info->separate_colour_plane_flag || sps_info->chroma_format_idc == 0) {
+			/* (7-18) */
+			crop_unit_x = 1;
+			/* (7-19) */
+			crop_unit_y = 2 - sps_info->frame_mbs_only_flag;
+			/* ChromaArrayType == chroma_format_idc */
+		} else {
+			/* (7-20) */
+			crop_unit_x = sub_width_c;
+			/* (7-21) */
+			crop_unit_y = sub_height_c * (2 - sps_info->frame_mbs_only_flag);
+		}
+
+		VDEC_ASSERT(sps_info->frame_crop_left_offset <=
+			(comseq_hdr_info->max_frame_size.width / crop_unit_x) -
+			(sps_info->frame_crop_right_offset + 1));
+
+		VDEC_ASSERT(sps_info->frame_crop_top_offset <=
+			(comseq_hdr_info->max_frame_size.height / crop_unit_y) -
+			(sps_info->frame_crop_bottom_offset + 1));
+		frame_crop_left = crop_unit_x * sps_info->frame_crop_left_offset;
+		frame_crop_right = comseq_hdr_info->max_frame_size.width -
+			(crop_unit_x * sps_info->frame_crop_right_offset);
+		frame_crop_top = crop_unit_y * sps_info->frame_crop_top_offset;
+		frame_crop_bottom = comseq_hdr_info->max_frame_size.height -
+			(crop_unit_y * sps_info->frame_crop_bottom_offset);
+		comseq_hdr_info->orig_display_region.left_offset = (unsigned int)frame_crop_left;
+		comseq_hdr_info->orig_display_region.top_offset = (unsigned int)frame_crop_top;
+		comseq_hdr_info->orig_display_region.width = (frame_crop_right - frame_crop_left);
+		comseq_hdr_info->orig_display_region.height = (frame_crop_bottom - frame_crop_top);
+	} else {
+		comseq_hdr_info->orig_display_region.left_offset = 0;
+		comseq_hdr_info->orig_display_region.top_offset = 0;
+		comseq_hdr_info->orig_display_region.width = comseq_hdr_info->max_frame_size.width;
+		comseq_hdr_info->orig_display_region.height =
+			comseq_hdr_info->max_frame_size.height;
+	}
+
+#ifdef REDUCED_DPB_NO_PIC_REORDERING
+	comseq_hdr_info->max_reorder_picts = vui_info->max_dec_frame_buffering;
+#else
+	if (sps_info->vui_parameters_present_flag && vui_info->bitstream_restriction_flag)
+		comseq_hdr_info->max_reorder_picts = vui_info->max_dec_frame_buffering;
+	else
+		comseq_hdr_info->max_reorder_picts = 0;
+#endif
+	comseq_hdr_info->separate_chroma_planes =
+		h264_seq_hdr_info->sps_info.separate_colour_plane_flag ? 1 : 0;
+}
+
+static void bspp_h264_pict_hdr_populate(enum h264_nalunittype nal_unit_type,
+					struct bspp_h264_slice_hdr_info *h264_slice_hdr_info,
+					struct vdec_comsequ_hdrinfo *comseq_hdr_info,
+					struct bspp_pict_hdr_info *pict_hdr_info)
+{
+	/*
+	 * H264 has slice coding type, not picture. The bReference contrary to the rest of the
+	 * standards is set explicitly from the NAL externally (see just below the call to
+	 * bspp_h264_pict_hdr_populate) pict_hdr_info->bReference = ? (Set externally for H264)
+	 */
+	pict_hdr_info->intra_coded = (nal_unit_type == H264_NALTYPE_IDR_SLICE) ? 1 : 0;
+	pict_hdr_info->field = h264_slice_hdr_info->field_pic_flag;
+
+	pict_hdr_info->post_processing = 0;
+	/* For H264 Maximum and Coded sizes are the same */
+	pict_hdr_info->coded_frame_size.width = comseq_hdr_info->max_frame_size.width;
+	/* For H264 Maximum and Coded sizes are the same */
+	pict_hdr_info->coded_frame_size.height = comseq_hdr_info->max_frame_size.height;
+	/*
+	 * For H264 Encoded Display size has been precomputed as part of the
+	 * common sequence info
+	 */
+	pict_hdr_info->disp_info.enc_disp_region = comseq_hdr_info->orig_display_region;
+	/*
+	 * For H264 there is no resampling, so encoded and actual display
+	 * regions are the same
+	 */
+	pict_hdr_info->disp_info.disp_region = comseq_hdr_info->orig_display_region;
+	/* H264 does not have that */
+	pict_hdr_info->disp_info.num_pan_scan_windows = 0;
+	memset(pict_hdr_info->disp_info.pan_scan_windows, 0,
+	       sizeof(pict_hdr_info->disp_info.pan_scan_windows));
+}
+
+static int bspp_h264_destroy_seq_hdr_info(const void *secure_sps_info)
+{
+	struct bspp_h264_seq_hdr_info *h264_seq_hdr_info = NULL;
+
+	if (!secure_sps_info)
+		return IMG_ERROR_INVALID_PARAMETERS;
+
+	h264_seq_hdr_info = (struct bspp_h264_seq_hdr_info *)secure_sps_info;
+
+	/* Cleaning vui_info */
+	kfree(h264_seq_hdr_info->vui_info.nal_hrd_parameters.bit_rate_value_minus1);
+	kfree(h264_seq_hdr_info->vui_info.nal_hrd_parameters.cpb_size_value_minus1);
+	kfree(h264_seq_hdr_info->vui_info.nal_hrd_parameters.cbr_flag);
+	kfree(h264_seq_hdr_info->vui_info.vcl_hrd_parameters.bit_rate_value_minus1);
+	kfree(h264_seq_hdr_info->vui_info.vcl_hrd_parameters.cpb_size_value_minus1);
+	kfree(h264_seq_hdr_info->vui_info.vcl_hrd_parameters.cbr_flag);
+
+	/* Cleaning sps_info */
+	kfree(h264_seq_hdr_info->sps_info.offset_for_ref_frame);
+	kfree(h264_seq_hdr_info->sps_info.scllst4x4seq);
+	kfree(h264_seq_hdr_info->sps_info.scllst8x8seq);
+
+	return 0;
+}
+
+static int bspp_h264_destroy_pps_info(const void *secure_pps_info)
+{
+	struct bspp_h264_pps_info *h264_pps_info = NULL;
+
+	if (!secure_pps_info)
+		return IMG_ERROR_INVALID_PARAMETERS;
+
+	h264_pps_info = (struct bspp_h264_pps_info *)secure_pps_info;
+	kfree(h264_pps_info->h264_ppssgm_info.slice_group_id);
+	h264_pps_info->h264_ppssgm_info.slicegroupidnum = 0;
+	kfree(h264_pps_info->scllst4x4pic);
+	kfree(h264_pps_info->scllst8x8pic);
+
+	return 0;
+}
+
+static int bspp_h264_destroy_data(enum bspp_unit_type data_type, void *data_handle)
+{
+	int result = 0;
+
+	if (!data_handle)
+		return IMG_ERROR_INVALID_PARAMETERS;
+
+	switch (data_type) {
+	case BSPP_UNIT_SEQUENCE:
+		result = bspp_h264_destroy_seq_hdr_info(data_handle);
+		break;
+	case BSPP_UNIT_PPS:
+		result = bspp_h264_destroy_pps_info(data_handle);
+		break;
+	default:
+		break;
+	}
+	return result;
+}
+
+static void bspp_h264_generate_slice_groupmap(struct bspp_h264_slice_hdr_info *h264_slice_hdr_info,
+					      struct bspp_h264_seq_hdr_info *h264_seq_hdr_info,
+					      struct bspp_h264_pps_info *h264_pps_info,
+					      unsigned char *map_unit_to_slice_groupmap,
+					      unsigned int map_size)
+{
+	int group;
+	unsigned int num_slice_group_mapunits;
+	unsigned int i = 0, j, k = 0;
+	unsigned char num_slice_groups = h264_pps_info->num_slice_groups_minus1 + 1;
+	unsigned int pic_width_in_mbs = h264_seq_hdr_info->sps_info.pic_width_in_mbs_minus1 + 1;
+	unsigned int pic_height_in_map_units =
+		h264_seq_hdr_info->sps_info.pic_height_in_map_units_minus1 + 1;
+
+	num_slice_group_mapunits = map_size;
+	if (h264_pps_info->slice_group_map_type == 6) {
+		if ((unsigned int)num_slice_groups != num_slice_group_mapunits) {
+			VDEC_ASSERT
+			("wrong pps->num_slice_group_map_units_minus1 for used SPS and FMO type 6"
+					==
+					NULL);
+			if (num_slice_group_mapunits >
+				h264_pps_info->h264_ppssgm_info.slicegroupidnum)
+				num_slice_group_mapunits =
+					h264_pps_info->h264_ppssgm_info.slicegroupidnum;
+		}
+	}
+
+	/* only one slice group */
+	if (h264_pps_info->num_slice_groups_minus1 == 0) {
+		memset(map_unit_to_slice_groupmap, 0, map_size * sizeof(unsigned char));
+		return;
+	}
+	if (h264_pps_info->num_slice_groups_minus1 >= MAX_SLICEGROUP_COUNT) {
+		memset(map_unit_to_slice_groupmap, 0, map_size * sizeof(unsigned char));
+		return;
+	}
+	if (h264_pps_info->slice_group_map_type == 0) {
+		do {
+			for (group =
+				0;
+				group <= h264_pps_info->num_slice_groups_minus1 &&
+				i < num_slice_group_mapunits;
+				i += h264_pps_info->run_length_minus1[group++] + 1) {
+				for (j = 0;
+					j <= h264_pps_info->run_length_minus1[group] &&
+					i + j < num_slice_group_mapunits;
+					j++)
+					map_unit_to_slice_groupmap[i + j] = group;
+			}
+		} while (i < num_slice_group_mapunits);
+	} else if (h264_pps_info->slice_group_map_type == 1) {
+		for (i = 0; i < num_slice_group_mapunits; i++) {
+			map_unit_to_slice_groupmap[i] = ((i % pic_width_in_mbs) +
+				(((i / pic_width_in_mbs) *
+				(h264_pps_info->num_slice_groups_minus1 + 1)) / 2)) %
+				(h264_pps_info->num_slice_groups_minus1 + 1);
+		}
+	} else if (h264_pps_info->slice_group_map_type == 2) {
+		unsigned int y_top_left, x_top_left, y_bottom_right, x_bottom_right, x, y;
+
+		for (i = 0; i < num_slice_group_mapunits; i++)
+			map_unit_to_slice_groupmap[i] = h264_pps_info->num_slice_groups_minus1;
+
+		for (group = h264_pps_info->num_slice_groups_minus1 - 1; group >= 0; group--) {
+			y_top_left = h264_pps_info->top_left[group] / pic_width_in_mbs;
+			x_top_left = h264_pps_info->top_left[group] % pic_width_in_mbs;
+			y_bottom_right = h264_pps_info->bottom_right[group] / pic_width_in_mbs;
+			x_bottom_right = h264_pps_info->bottom_right[group] % pic_width_in_mbs;
+			for (y = y_top_left; y <= y_bottom_right; y++)
+				for (x = x_top_left; x <= x_bottom_right; x++) {
+					if (h264_pps_info->top_left[group] >
+						h264_pps_info->bottom_right[group] ||
+						h264_pps_info->bottom_right[group] >=
+						num_slice_group_mapunits)
+						continue;
+					map_unit_to_slice_groupmap[y * pic_width_in_mbs +
+					x] = group;
+				}
+		}
+	} else if (h264_pps_info->slice_group_map_type == 3) {
+		int left_bound, top_bound, right_bound, bottom_bound;
+		int x, y, x_dir, y_dir;
+		int map_unit_vacant;
+
+		unsigned int mapunits_in_slicegroup_0 =
+			umin((unsigned int)((h264_pps_info->slice_group_change_rate_minus1 + 1) *
+				h264_slice_hdr_info->slice_group_change_cycle),
+				(unsigned int)num_slice_group_mapunits);
+
+		for (i = 0; i < num_slice_group_mapunits; i++)
+			map_unit_to_slice_groupmap[i] = 2;
+
+		x = (pic_width_in_mbs - h264_pps_info->slice_group_change_direction_flag) / 2;
+		y = (pic_height_in_map_units - h264_pps_info->slice_group_change_direction_flag) /
+			2;
+
+		left_bound = x;
+		top_bound = y;
+		right_bound = x;
+		bottom_bound = y;
+
+		x_dir = h264_pps_info->slice_group_change_direction_flag - 1;
+		y_dir = h264_pps_info->slice_group_change_direction_flag;
+
+		for (k = 0; k < num_slice_group_mapunits; k += map_unit_vacant) {
+			map_unit_vacant =
+				(map_unit_to_slice_groupmap[y * pic_width_in_mbs + x] ==
+				2);
+			if (map_unit_vacant)
+				map_unit_to_slice_groupmap[y * pic_width_in_mbs + x] =
+					(k >= mapunits_in_slicegroup_0);
+
+			if (x_dir == -1 && x == left_bound) {
+				left_bound = smax(left_bound - 1, 0);
+				x = left_bound;
+				x_dir = 0;
+				y_dir = 2 * h264_pps_info->slice_group_change_direction_flag - 1;
+			} else if (x_dir == 1 && x == right_bound) {
+				right_bound = smin(right_bound + 1, (int)pic_width_in_mbs - 1);
+				x = right_bound;
+				x_dir = 0;
+				y_dir = 1 - 2 * h264_pps_info->slice_group_change_direction_flag;
+			} else if (y_dir == -1 && y == top_bound) {
+				top_bound = smax(top_bound - 1, 0);
+				y = top_bound;
+				x_dir = 1 - 2 * h264_pps_info->slice_group_change_direction_flag;
+				y_dir = 0;
+			} else if (y_dir == 1 && y == bottom_bound) {
+				bottom_bound = smin(bottom_bound + 1,
+						    (int)pic_height_in_map_units - 1);
+				y = bottom_bound;
+				x_dir = 2 * h264_pps_info->slice_group_change_direction_flag - 1;
+				y_dir = 0;
+			} else {
+				x = x + x_dir;
+				y = y + y_dir;
+			}
+		}
+	} else if (h264_pps_info->slice_group_map_type == 4) {
+		unsigned int mapunits_in_slicegroup_0 =
+			umin((unsigned int)((h264_pps_info->slice_group_change_rate_minus1 + 1) *
+				h264_slice_hdr_info->slice_group_change_cycle),
+				(unsigned int)num_slice_group_mapunits);
+		unsigned int sizeof_upper_left_group =
+			h264_pps_info->slice_group_change_direction_flag ?
+			(num_slice_group_mapunits -
+			mapunits_in_slicegroup_0) : mapunits_in_slicegroup_0;
+		for (i = 0; i < num_slice_group_mapunits; i++) {
+			if (i < sizeof_upper_left_group)
+				map_unit_to_slice_groupmap[i] =
+					h264_pps_info->slice_group_change_direction_flag;
+
+			else
+				map_unit_to_slice_groupmap[i] = 1 -
+					h264_pps_info->slice_group_change_direction_flag;
+		}
+	} else if (h264_pps_info->slice_group_map_type == 5) {
+		unsigned int mapunits_in_slicegroup_0 =
+			umin((unsigned int)((h264_pps_info->slice_group_change_rate_minus1 + 1) *
+				h264_slice_hdr_info->slice_group_change_cycle),
+				(unsigned int)num_slice_group_mapunits);
+		unsigned int sizeof_upper_left_group =
+			h264_pps_info->slice_group_change_direction_flag ?
+			(num_slice_group_mapunits -
+			mapunits_in_slicegroup_0) : mapunits_in_slicegroup_0;
+
+		for (j = 0; j < (unsigned int)pic_width_in_mbs; j++) {
+			for (i = 0; i < (unsigned int)pic_height_in_map_units; i++) {
+				if (k++ < sizeof_upper_left_group)
+					map_unit_to_slice_groupmap[i * pic_width_in_mbs + j] =
+						h264_pps_info->slice_group_change_direction_flag;
+				else
+					map_unit_to_slice_groupmap[i * pic_width_in_mbs + j] =
+						1 -
+						h264_pps_info->slice_group_change_direction_flag;
+			}
+		}
+	} else if (h264_pps_info->slice_group_map_type == 6) {
+		VDEC_ASSERT(num_slice_group_mapunits <=
+			    h264_pps_info->h264_ppssgm_info.slicegroupidnum);
+		for (i = 0; i < num_slice_group_mapunits; i++)
+			map_unit_to_slice_groupmap[i] =
+				h264_pps_info->h264_ppssgm_info.slice_group_id[i];
+	}
+}
+
+static int bspp_h264_parse_mvc_slice_extension(void *swsr_context,
+					       struct bspp_h264_inter_pict_ctx *inter_pict_ctx)
+{
+	if (!swsr_read_bits(swsr_context, 1)) {
+		swsr_read_bits(swsr_context, 7);
+		inter_pict_ctx->current_view_id = swsr_read_bits(swsr_context, 10);
+		swsr_read_bits(swsr_context, 6);
+		return 1;
+	}
+
+	return 0;
+}
+
+static int bspp_h264_unitparser_compile_sgmdata
+			(struct bspp_h264_slice_hdr_info *h264_slice_hdr_info,
+			 struct bspp_h264_seq_hdr_info *h264_seq_hdr_info,
+			 struct bspp_h264_pps_info *h264_pps_info,
+			 struct bspp_pict_hdr_info *pict_hdr_info)
+{
+	memset(&pict_hdr_info->pict_sgm_data, 0, sizeof(*&pict_hdr_info->pict_sgm_data));
+
+	pict_hdr_info->pict_sgm_data.id = 1;
+
+	/* Allocate memory for SGM. */
+	pict_hdr_info->pict_sgm_data.size =
+		(h264_seq_hdr_info->sps_info.pic_height_in_map_units_minus1 + 1) *
+		(h264_seq_hdr_info->sps_info.pic_width_in_mbs_minus1 + 1);
+
+	pict_hdr_info->pict_sgm_data.pic_data = kmalloc((pict_hdr_info->pict_sgm_data.size),
+							GFP_KERNEL);
+	VDEC_ASSERT(pict_hdr_info->pict_sgm_data.pic_data);
+	if (!pict_hdr_info->pict_sgm_data.pic_data) {
+		pict_hdr_info->pict_sgm_data.id = BSPP_INVALID;
+		return IMG_ERROR_OUT_OF_MEMORY;
+	}
+
+	bspp_h264_generate_slice_groupmap(h264_slice_hdr_info, h264_seq_hdr_info, h264_pps_info,
+					  pict_hdr_info->pict_sgm_data.pic_data,
+					  pict_hdr_info->pict_sgm_data.size);
+
+	/* check the discontinuous_mbs_flaginCurrFrame flag for FMO */
+	/* NO FMO support */
+	pict_hdr_info->discontinuous_mbs = 0;
+
+	return 0;
+}
+
+static int bspp_h264_unit_parser(void *swsr_context, struct bspp_unit_data *unit_data)
+{
+	unsigned int result = 0;
+	enum bspp_error_type parse_error = BSPP_ERROR_NONE;
+	enum h264_nalunittype nal_unit_type;
+	unsigned char nal_ref_idc;
+	struct bspp_h264_inter_pict_ctx *interpicctx;
+	struct bspp_sequence_hdr_info *out_seq_info;
+	unsigned char id;
+
+	interpicctx = &unit_data->parse_state->inter_pict_ctx->h264_ctx;
+	out_seq_info = unit_data->out.sequ_hdr_info;
+
+	/* At this point we should be EXACTLY at the NALTYPE byte */
+	/* parse the nal header type */
+	swsr_read_bits(swsr_context, 1);
+	nal_ref_idc = swsr_read_bits(swsr_context, 2);
+	nal_unit_type = (enum h264_nalunittype)swsr_read_bits(swsr_context, 5);
+
+	switch (unit_data->unit_type) {
+	case BSPP_UNIT_SEQUENCE:
+		VDEC_ASSERT(nal_unit_type == H264_NALTYPE_SEQUENCE_PARAMETER_SET ||
+			    nal_unit_type == H264_NALTYPE_SUBSET_SPS);
+		{
+			unsigned char id_loc;
+			/* Parse SPS structure */
+			struct bspp_h264_seq_hdr_info *h264_seq_hdr_info =
+			(struct bspp_h264_seq_hdr_info *)(out_seq_info->secure_sequence_info);
+			/* FW SPS Data structure */
+			struct bspp_ddbuf_array_info *tmp = &out_seq_info->fw_sequence;
+			struct h264fw_sequence_ps *h264_fwseq_hdr_info =
+			(struct h264fw_sequence_ps *)((unsigned char *)tmp->ddbuf_info.cpu_virt_addr
+				+ tmp->buf_offset);
+			/* Common Sequence Header Info */
+			struct vdec_comsequ_hdrinfo *comseq_hdr_info =
+				&out_seq_info->sequ_hdr_info.com_sequ_hdr_info;
+
+#ifdef DEBUG_DECODER_DRIVER
+			pr_info("Unit Parser:Found SEQUENCE_PARAMETER_SET NAL unit");
+#endif
+			VDEC_ASSERT(h264_seq_hdr_info);
+			VDEC_ASSERT(h264_fwseq_hdr_info);
+			if (!h264_seq_hdr_info)
+				return IMG_ERROR_ALREADY_COMPLETE;
+
+			if (!h264_fwseq_hdr_info)
+				return IMG_ERROR_ALREADY_COMPLETE;
+
+			/* Call SPS parser to populate the "Parse SPS Structure" */
+			unit_data->parse_error |=
+				bspp_h264_sps_parser(swsr_context, unit_data->str_res_handle,
+						     h264_seq_hdr_info);
+			/* From "Parse SPS Structure" populate the "FW SPS Data Structure" */
+			bspp_h264_fwseq_hdr_populate(h264_seq_hdr_info, h264_fwseq_hdr_info);
+			/*
+			 * From "Parse SPS Structure" populate the
+			 * "Common Sequence Header Info"
+			 */
+			bspp_h264_commonseq_hdr_populate(h264_seq_hdr_info, comseq_hdr_info);
+			/* Set the SPS ID */
+			/*
+			 * seq_parameter_set_id is always in range 0-31, so we can
+			 * add offset indicating subsequence header
+			 */
+			id_loc = h264_seq_hdr_info->sps_info.seq_parameter_set_id;
+			out_seq_info->sequ_hdr_info.sequ_hdr_id =
+				(nal_unit_type == H264_NALTYPE_SLICE_SCALABLE ||
+				nal_unit_type == H264_NALTYPE_SLICE_IDR_SCALABLE ||
+				nal_unit_type == H264_NALTYPE_SUBSET_SPS) ? id_loc + 32 : id_loc;
+
+			/*
+			 * Set the first SPS ID as Active SPS ID for SEI parsing
+			 * to cover the case of not having SeiBufferingPeriod to
+			 * give us the SPS ID
+			 */
+			if (interpicctx->active_sps_for_sei_parsing == BSPP_INVALID)
+				interpicctx->active_sps_for_sei_parsing =
+					h264_seq_hdr_info->sps_info.seq_parameter_set_id;
+		}
+		break;
+
+	case BSPP_UNIT_PPS:
+		VDEC_ASSERT(nal_unit_type == H264_NALTYPE_PICTURE_PARAMETER_SET);
+		{
+			/* Parse PPS structure */
+			struct bspp_h264_pps_info *h264_pps_info =
+			(struct bspp_h264_pps_info *)(unit_data->out.pps_info->secure_pps_info);
+			/* FW PPS Data structure */
+			struct bspp_ddbuf_array_info *tmp = &unit_data->out.pps_info->fw_pps;
+			struct h264fw_picture_ps *h264fw_pps_info =
+				(struct h264fw_picture_ps *)((unsigned char *)
+						tmp->ddbuf_info.cpu_virt_addr + tmp->buf_offset);
+
+#ifdef DEBUG_DECODER_DRIVER
+			pr_info("Unit Parser:Found PICTURE_PARAMETER_SET NAL unit");
+#endif
+			VDEC_ASSERT(h264_pps_info);
+			VDEC_ASSERT(h264fw_pps_info);
+
+			/* Call PPS parser to populate the "Parse PPS Structure" */
+			unit_data->parse_error |=
+				bspp_h264_pps_parser(swsr_context, unit_data->str_res_handle,
+						     h264_pps_info);
+			/* From "Parse PPS Structure" populate the "FW PPS Data Structure"
+			 * - the scaling lists
+			 */
+			bspp_h264_fwpps_populate(h264_pps_info, h264fw_pps_info);
+			/* Set the PPS ID */
+			unit_data->out.pps_info->pps_id = h264_pps_info->pps_id;
+		}
+		break;
+
+	case BSPP_UNIT_PICTURE:
+		if (nal_unit_type == H264_NALTYPE_SLICE_PREFIX) {
+			if (bspp_h264_parse_mvc_slice_extension(swsr_context, interpicctx))
+				pr_err("%s: No MVC support\n", __func__);
+		} else if (nal_unit_type == H264_NALTYPE_SLICE_SCALABLE ||
+			nal_unit_type == H264_NALTYPE_SLICE_IDR_SCALABLE ||
+			nal_unit_type == H264_NALTYPE_SLICE ||
+			nal_unit_type == H264_NALTYPE_IDR_SLICE) {
+			struct bspp_h264_slice_hdr_info h264_slice_hdr_info;
+			struct bspp_h264_pps_info *h264_pps_info;
+			struct bspp_pps_info *pps_info;
+			struct h264fw_picture_ps *h264fw_pps_info;
+			struct h264fw_sequence_ps *h264_fwseq_hdr_info;
+			struct bspp_h264_seq_hdr_info *h264_seq_hdr_info;
+			struct bspp_sequence_hdr_info *sequ_hdr_info;
+			struct bspp_ddbuf_array_info *tmp1;
+			struct bspp_ddbuf_array_info *tmp2;
+			int current_pic_is_new = 0;
+			int determined = 0;
+			int id_loc;
+
+#ifdef DEBUG_DECODER_DRIVER
+			pr_info("Unit Parser:Found PICTURE DATA unit");
+#endif
+
+			unit_data->slice = 1;
+			unit_data->ext_slice = 0;
+
+			if (nal_unit_type == H264_NALTYPE_SLICE_SCALABLE ||
+			    nal_unit_type == H264_NALTYPE_SLICE_IDR_SCALABLE) {
+				pr_err("%s: No SVC support\n", __func__);
+			}
+
+			VDEC_ASSERT(unit_data->out.pict_hdr_info);
+			if (!unit_data->out.pict_hdr_info)
+				return IMG_ERROR_CANCELLED;
+
+			/* Default */
+			unit_data->out.pict_hdr_info->discontinuous_mbs = 0;
+
+			/*
+			 * Parse the Pic Header, return Parse SPS/PPS
+			 * structures
+			 */
+			parse_error = bspp_h264_pict_hdr_parser(swsr_context,
+								unit_data->str_res_handle,
+								&h264_slice_hdr_info,
+								&pps_info,
+								&sequ_hdr_info,
+								nal_unit_type,
+								nal_ref_idc);
+
+			if (parse_error) {
+				unit_data->parse_error |= parse_error;
+				return IMG_ERROR_CANCELLED;
+			}
+
+			/*
+			 * We are signalling closed GOP at every I frame
+			 * This does not conform 100% with the
+			 * specification but insures that seeking always
+			 * works.
+			 */
+			unit_data->new_closed_gop = h264_slice_hdr_info.slice_type ==
+				I_SLICE ? 1 : 0;
+
+			/*
+			 * Now pps_info and sequ_hdr_info contain the
+			 * PPS/SPS info related to this picture
+			 */
+			h264_pps_info = (struct bspp_h264_pps_info *)pps_info->secure_pps_info;
+			h264_seq_hdr_info =
+			(struct bspp_h264_seq_hdr_info *)sequ_hdr_info->secure_sequence_info;
+
+			tmp1 = &pps_info->fw_pps;
+			tmp2 = &sequ_hdr_info->fw_sequence;
+
+			h264fw_pps_info = (struct h264fw_picture_ps *)((unsigned char *)
+						tmp1->ddbuf_info.cpu_virt_addr + tmp1->buf_offset);
+			h264_fwseq_hdr_info = (struct h264fw_sequence_ps *)((unsigned char *)
+					tmp2->ddbuf_info.cpu_virt_addr + tmp2->buf_offset);
+			VDEC_ASSERT(h264_slice_hdr_info.pps_id == h264_pps_info->pps_id);
+			VDEC_ASSERT(h264_pps_info->seq_parameter_set_id ==
+				(unsigned int)h264_seq_hdr_info->sps_info.seq_parameter_set_id);
+
+			/*
+			 * Update the decoding-related FW SPS info related to the current picture
+			 * with the SEI data that were potentially received and also relate to
+			 * the current info. Until we receive the picture we do not know which
+			 * sequence to update with the SEI data.
+			 * Setfrom last SEI, needed for decoding
+			 */
+			h264_fwseq_hdr_info->disable_vdmc_filt = interpicctx->disable_vdmc_filt;
+			h264_fwseq_hdr_info->transform4x4_mb_not_available =
+							interpicctx->b4x4transform_mb_unavailable;
+
+			/*
+			 * Determine if current slice is a new picture, and update the related
+			 * params for future reference
+			 * Order of checks is important
+			 */
+			{
+				struct bspp_parse_state *state = unit_data->parse_state;
+
+				set_if_not_determined_yet(&determined, state->new_view,
+							  &current_pic_is_new, 1);
+				set_if_not_determined_yet(&determined, state->next_pic_is_new,
+							  &current_pic_is_new, 1);
+				set_if_not_determined_yet
+					(&determined,
+					 (h264_slice_hdr_info.redundant_pic_cnt > 0),
+					 &current_pic_is_new, 0);
+				set_if_not_determined_yet
+					(&determined,
+					 (state->prev_frame_num !=
+					  h264_slice_hdr_info.frame_num),
+					 &current_pic_is_new, 1);
+				set_if_not_determined_yet
+					(&determined,
+					 (state->prev_pps_id != h264_slice_hdr_info.pps_id),
+					 &current_pic_is_new, 1);
+				set_if_not_determined_yet
+					(&determined,
+					 (state->prev_field_pic_flag !=
+					  h264_slice_hdr_info.field_pic_flag),
+					 &current_pic_is_new, 1);
+				set_if_not_determined_yet
+					(&determined,
+					 ((h264_slice_hdr_info.field_pic_flag) &&
+					  (state->prev_bottom_pic_flag !=
+					   h264_slice_hdr_info.bottom_field_flag)),
+					 &current_pic_is_new, 1);
+				set_if_not_determined_yet
+					(&determined,
+					 ((state->prev_nal_ref_idc == 0 || nal_ref_idc == 0) &&
+					  (state->prev_nal_ref_idc != nal_ref_idc)),
+					 &current_pic_is_new, 1);
+				set_if_not_determined_yet
+					(&determined,
+					 ((h264_seq_hdr_info->sps_info.pic_order_cnt_type == 0) &&
+					  ((state->prev_pic_order_cnt_lsb !=
+					    h264_slice_hdr_info.pic_order_cnt_lsb) ||
+					   (state->prev_delta_pic_order_cnt_bottom !=
+					    h264_slice_hdr_info.delta_pic_order_cnt_bottom))),
+					 &current_pic_is_new, 1);
+				set_if_not_determined_yet
+					(&determined,
+					 ((h264_seq_hdr_info->sps_info.pic_order_cnt_type == 1) &&
+					  ((state->prev_delta_pic_order_cnt[0] !=
+					    h264_slice_hdr_info.delta_pic_order_cnt[0]) ||
+					   (state->prev_delta_pic_order_cnt[1] !=
+					    h264_slice_hdr_info.delta_pic_order_cnt[1]))),
+					 &current_pic_is_new, 1);
+				set_if_not_determined_yet
+					(&determined,
+					 ((state->prev_nal_unit_type ==
+					   (int)H264_NALTYPE_IDR_SLICE ||
+					   nal_unit_type == (int)H264_NALTYPE_IDR_SLICE) &&
+					  (state->prev_nal_unit_type !=
+					   (int)nal_unit_type)),
+					 &current_pic_is_new, 1);
+				set_if_not_determined_yet(&determined,
+							  ((state->prev_nal_unit_type ==
+							    (int)H264_NALTYPE_IDR_SLICE) &&
+							   (state->prev_idr_pic_id !=
+							    h264_slice_hdr_info.idr_pic_id)),
+							  &current_pic_is_new, 1);
+
+				/*
+				 * Update whatever is not updated already in different places of
+				 * the code or just needs to be updated here
+				 */
+				state->prev_frame_num = h264_slice_hdr_info.frame_num;
+				state->prev_pps_id = h264_slice_hdr_info.pps_id;
+				state->prev_field_pic_flag =
+					h264_slice_hdr_info.field_pic_flag;
+				state->prev_nal_ref_idc = nal_ref_idc;
+				state->prev_pic_order_cnt_lsb =
+					h264_slice_hdr_info.pic_order_cnt_lsb;
+				state->prev_delta_pic_order_cnt_bottom =
+					h264_slice_hdr_info.delta_pic_order_cnt_bottom;
+				state->prev_delta_pic_order_cnt[0] =
+					h264_slice_hdr_info.delta_pic_order_cnt[0];
+				state->prev_delta_pic_order_cnt[1] =
+					h264_slice_hdr_info.delta_pic_order_cnt[1];
+				state->prev_nal_unit_type = (int)nal_unit_type;
+				state->prev_idr_pic_id = h264_slice_hdr_info.idr_pic_id;
+			}
+
+			/* Detect second field and manage the prev_bottom_pic_flag flag */
+			if (h264_slice_hdr_info.field_pic_flag && current_pic_is_new) {
+				unit_data->parse_state->prev_bottom_pic_flag =
+					h264_slice_hdr_info.bottom_field_flag;
+			}
+
+			/* Detect ASO    Just met new pic */
+			id = h264_slice_hdr_info.colour_plane_id;
+			if (current_pic_is_new) {
+				unsigned int i;
+
+				for (i = 0; i < MAX_COMPONENTS; i++)
+					unit_data->parse_state->prev_first_mb_in_slice[i] = 0;
+			} else if (unit_data->parse_state->prev_first_mb_in_slice[id] >
+				h264_slice_hdr_info.first_mb_in_slice) {
+				/* We just found ASO */
+				unit_data->parse_state->discontinuous_mb = 1;
+			}
+			unit_data->parse_state->prev_first_mb_in_slice[id] =
+				h264_slice_hdr_info.first_mb_in_slice;
+
+			/* We may already knew we were DiscontinuousMB */
+			if (unit_data->parse_state->discontinuous_mb)
+				unit_data->out.pict_hdr_info->discontinuous_mbs =
+					unit_data->parse_state->discontinuous_mb;
+
+			/*
+			 * We want to calculate the scaling lists only once per picture/field,
+			 * not every slice We want to populate the VDEC Picture Header Info
+			 * only once per picture/field, not every slice
+			 */
+			if (current_pic_is_new) {
+				/* Common Sequence Header Info fetched */
+				struct vdec_comsequ_hdrinfo *comseq_hdr_info =
+					&sequ_hdr_info->sequ_hdr_info.com_sequ_hdr_info;
+				struct bspp_pict_data *type_pict_aux_data;
+
+				unit_data->parse_state->next_pic_is_new = 0;
+
+				/* Generate SGM for this picture */
+				if (h264_pps_info->num_slice_groups_minus1 != 0 &&
+				    h264_pps_info->slice_group_map_type <= 6) {
+					bspp_h264_unitparser_compile_sgmdata
+								(&h264_slice_hdr_info,
+								 h264_seq_hdr_info,
+								 h264_pps_info,
+								 unit_data->out.pict_hdr_info);
+				} else {
+					unit_data->out.pict_hdr_info->pict_sgm_data.pic_data = NULL;
+					unit_data->out.pict_hdr_info->pict_sgm_data.bufmap_id = 0;
+					unit_data->out.pict_hdr_info->pict_sgm_data.buf_offset = 0;
+					unit_data->out.pict_hdr_info->pict_sgm_data.id =
+						BSPP_INVALID;
+					unit_data->out.pict_hdr_info->pict_sgm_data.size = 0;
+				}
+
+				unit_data->parse_state->discontinuous_mb =
+					unit_data->out.pict_hdr_info->discontinuous_mbs;
+
+				/*
+				 * Select the scaling lists based on h264_pps_info and
+				 * h264_seq_hdr_info and pass them to h264fw_pps_info
+				 */
+				bspp_h264_select_scaling_list(h264fw_pps_info,
+							      h264_pps_info,
+							      h264_seq_hdr_info);
+
+				/*
+				 * Uses the common sequence/SINGLE-slice info to populate the
+				 * VDEC Picture Header Info
+				 */
+				bspp_h264_pict_hdr_populate(nal_unit_type, &h264_slice_hdr_info,
+							    comseq_hdr_info,
+							    unit_data->out.pict_hdr_info);
+
+				/* Store some raw bitstream fields for output. */
+				unit_data->out.pict_hdr_info->h264_pict_hdr_info.frame_num =
+					h264_slice_hdr_info.frame_num;
+				unit_data->out.pict_hdr_info->h264_pict_hdr_info.nal_ref_idc =
+					nal_ref_idc;
+
+				/*
+				 * Update the display-related picture header information with
+				 * the related SEI parsed data The display-related SEI is
+				 * used only for the first picture after the SEI
+				 */
+				if (!interpicctx->sei_info_attached_to_pic) {
+					interpicctx->sei_info_attached_to_pic = 1;
+					if (interpicctx->active_sps_for_sei_parsing !=
+						h264_seq_hdr_info->sps_info.seq_parameter_set_id) {
+						/*
+						 * We tried to guess the SPS ID that we should use
+						 * to parse the SEI, but we guessed wrong
+						 */
+						pr_err("Parsed SEI with wrong SPS, data may be parsed wrong");
+					}
+					unit_data->out.pict_hdr_info->disp_info.repeat_first_fld =
+						interpicctx->repeat_first_field;
+					unit_data->out.pict_hdr_info->disp_info.max_frm_repeat =
+						interpicctx->max_frm_repeat;
+					/* SEI - Not supported */
+				}
+
+				/*
+				 * For Idr slices update the Active
+				 * Sequence ID for SEI parsing,
+				 * error resilient
+				 */
+				if (nal_unit_type == H264_NALTYPE_IDR_SLICE)
+					interpicctx->active_sps_for_sei_parsing =
+						h264_seq_hdr_info->sps_info.seq_parameter_set_id;
+
+				/*
+				 * Choose the appropriate auxiliary data
+				 * structure to populate.
+				 */
+				if (unit_data->parse_state->second_field_flag)
+					type_pict_aux_data =
+						&unit_data->out.pict_hdr_info->second_pict_aux_data;
+
+				else
+					type_pict_aux_data =
+						&unit_data->out.pict_hdr_info->pict_aux_data;
+
+				/*
+				 * We have no container for the PPS that
+				 * passes down to the kernel, for this
+				 * reason the h264 secure parser needs
+				 * to populate that info into the
+				 * picture header (Second)PictAuxData.
+				 */
+				type_pict_aux_data->bufmap_id = pps_info->bufmap_id;
+				type_pict_aux_data->buf_offset = pps_info->buf_offset;
+				type_pict_aux_data->pic_data = (void *)h264fw_pps_info;
+				type_pict_aux_data->id = h264_pps_info->pps_id;
+				type_pict_aux_data->size = sizeof(struct h264fw_picture_ps);
+
+				pps_info->ref_count++;
+
+				/* This info comes from NAL directly */
+				unit_data->out.pict_hdr_info->ref = (nal_ref_idc == 0) ? 0 : 1;
+			}
+			if (nal_unit_type == H264_NALTYPE_IDR_SLICE)
+				unit_data->new_closed_gop = 1;
+
+			/* Return the SPS ID */
+			/*
+			 * seq_parameter_set_id is always in range 0-31,
+			 * so we can add offset indicating subsequence header
+			 */
+			id_loc = h264_pps_info->seq_parameter_set_id;
+			unit_data->pict_sequ_hdr_id =
+				(nal_unit_type == H264_NALTYPE_SLICE_SCALABLE ||
+				nal_unit_type ==
+				H264_NALTYPE_SLICE_IDR_SCALABLE) ? id_loc + 32 : id_loc;
+
+		} else if (nal_unit_type == H264_NALTYPE_SLICE_PARTITION_A ||
+			nal_unit_type == H264_NALTYPE_SLICE_PARTITION_B ||
+			nal_unit_type == H264_NALTYPE_SLICE_PARTITION_C) {
+			unit_data->slice = 1;
+
+			pr_err("Unsupported Slice NAL type: %d", nal_unit_type);
+			unit_data->parse_error = BSPP_ERROR_UNSUPPORTED;
+		}
+		break;
+
+	case BSPP_UNIT_UNCLASSIFIED:
+		if (nal_unit_type == H264_NALTYPE_ACCESS_UNIT_DELIMITER) {
+			unit_data->parse_state->next_pic_is_new = 1;
+		} else if (nal_unit_type == H264_NALTYPE_SLICE_PREFIX ||
+			nal_unit_type == H264_NALTYPE_SUBSET_SPS) {
+			/* if mvc disabled do nothing */
+		} else {
+			/* Should not have any other type of unclassified data. */
+			pr_err("unclassified data detected!\n");
+		}
+		break;
+
+	case BSPP_UNIT_NON_PICTURE:
+		if (nal_unit_type == H264_NALTYPE_END_OF_SEQUENCE ||
+		    nal_unit_type == H264_NALTYPE_END_OF_STREAM) {
+			unit_data->parse_state->next_pic_is_new = 1;
+		} else if (nal_unit_type == H264_NALTYPE_FILLER_DATA ||
+			nal_unit_type == H264_NALTYPE_SEQUENCE_PARAMETER_SET_EXTENSION ||
+			nal_unit_type == H264_NALTYPE_AUXILIARY_SLICE) {
+		} else if (nal_unit_type == H264_NALTYPE_SLICE_SCALABLE ||
+			nal_unit_type == H264_NALTYPE_SLICE_IDR_SCALABLE) {
+			/* if mvc disabled do nothing */
+		} else {
+			/* Should not have any other type of non-picture data. */
+			VDEC_ASSERT(0);
+		}
+		break;
+
+	case BSPP_UNIT_UNSUPPORTED:
+		pr_err("Unsupported NAL type: %d", nal_unit_type);
+		unit_data->parse_error = BSPP_ERROR_UNKNOWN_DATAUNIT_DETECTED;
+		break;
+
+	default:
+		VDEC_ASSERT(0);
+		break;
+	}
+
+	return result;
+}
+
+static int bspp_h264releasedata(void *str_alloc, enum bspp_unit_type data_type, void *data_handle)
+{
+	int result = 0;
+
+	if (!data_handle)
+		return IMG_ERROR_INVALID_PARAMETERS;
+
+	switch (data_type) {
+	case BSPP_UNIT_SEQUENCE:
+		result = bspp_h264_release_sequ_hdr_info(str_alloc, data_handle);
+		break;
+	default:
+		break;
+	}
+
+	return result;
+}
+
+static int bspp_h264resetdata(enum bspp_unit_type data_type, void *data_handle)
+{
+	int result = 0;
+
+	if (!data_handle)
+		return IMG_ERROR_INVALID_PARAMETERS;
+
+	switch (data_type) {
+	case BSPP_UNIT_SEQUENCE:
+		result = bspp_h264_reset_seq_hdr_info(data_handle);
+		break;
+	case BSPP_UNIT_PPS:
+		result = bspp_h264_reset_pps_info(data_handle);
+		break;
+	default:
+		break;
+	}
+
+	return result;
+}
+
+static void bspp_h264parse_codecconfig(void *swsr_ctx,
+				       unsigned int *unitcount,
+				       unsigned int *unit_arraycount,
+				       unsigned int *delimlength,
+				       unsigned int *size_delimlength)
+{
+	unsigned long long value = 6;
+
+	/*
+	 * Set the shift-register up to provide next 6 bytes
+	 * without emulation prevention detection.
+	 */
+	swsr_consume_delim(swsr_ctx, SWSR_EMPREVENT_NONE, 0, &value);
+
+	/*
+	 * Codec config header must be read for size delimited data (H.264)
+	 * to get to the start of each unit.
+	 * This parsing follows section 5.2.4.1.1 of ISO/IEC 14496-15:2004(E).
+	 */
+	/* Configuration version. */
+	swsr_read_bits(swsr_ctx, 8);
+	/* AVC Profile Indication. */
+	swsr_read_bits(swsr_ctx, 8);
+	/* Profile compatibility. */
+	swsr_read_bits(swsr_ctx, 8);
+	/* AVC Level Indication. */
+	swsr_read_bits(swsr_ctx, 8);
+	*delimlength = ((swsr_read_bits(swsr_ctx, 8) & 0x3) + 1) * 8;
+	*unitcount = swsr_read_bits(swsr_ctx, 8) & 0x1f;
+
+	/* Size delimiter is only 2 bytes for H.264 codec configuration. */
+	*size_delimlength = 2 * 8;
+}
+
+static void bspp_h264update_unitcounts(void *swsr_ctx,
+				       unsigned int *unitcount,
+				       unsigned int *unit_arraycount)
+{
+	if (*unitcount == 0) {
+		unsigned long long value = 1;
+
+		/*
+		 * Set the shift-register up to provide next 1 byte without
+		 * emulation prevention detection.
+		 */
+		swsr_consume_delim(swsr_ctx, SWSR_EMPREVENT_NONE, 0, &value);
+
+		*unitcount = swsr_read_bits(swsr_ctx, 8);
+	}
+
+	(*unitcount)--;
+}
+
+/*
+ * Sets the parser configuration
+ */
+int bspp_h264_set_parser_config(enum vdec_bstr_format bstr_format,
+				struct bspp_vid_std_features *pvidstd_features,
+				struct bspp_swsr_ctx *pswsr_ctx,
+				struct bspp_parser_callbacks *pparser_callbacks,
+				struct bspp_inter_pict_data *pinterpict_data)
+{
+	/* Set h.246 parser callbacks. */
+	pparser_callbacks->parse_unit_cb = bspp_h264_unit_parser;
+	pparser_callbacks->release_data_cb = bspp_h264releasedata;
+	pparser_callbacks->reset_data_cb = bspp_h264resetdata;
+	pparser_callbacks->destroy_data_cb = bspp_h264_destroy_data;
+	pparser_callbacks->parse_codec_config_cb = bspp_h264parse_codecconfig;
+	pparser_callbacks->update_unit_counts_cb = bspp_h264update_unitcounts;
+
+	/* Set h.246 specific features. */
+	pvidstd_features->seq_size = sizeof(struct bspp_h264_seq_hdr_info);
+	pvidstd_features->uses_pps = 1;
+	pvidstd_features->pps_size = sizeof(struct bspp_h264_pps_info);
+
+	/* Set h.246 specific shift register config. */
+	pswsr_ctx->emulation_prevention = SWSR_EMPREVENT_00000300;
+	pinterpict_data->h264_ctx.active_sps_for_sei_parsing = BSPP_INVALID;
+
+	if (bstr_format == VDEC_BSTRFORMAT_DEMUX_BYTESTREAM ||
+	    bstr_format == VDEC_BSTRFORMAT_ELEMENTARY) {
+		pswsr_ctx->sr_config.delim_type = SWSR_DELIM_SCP;
+		pswsr_ctx->sr_config.delim_length = 3 * 8;
+		pswsr_ctx->sr_config.scp_value = 0x000001;
+	} else if (bstr_format == VDEC_BSTRFORMAT_DEMUX_SIZEDELIMITED) {
+		pswsr_ctx->sr_config.delim_type = SWSR_DELIM_SIZE;
+		/* Set the default size-delimiter number of bits */
+		pswsr_ctx->sr_config.delim_length = 4 * 8;
+	} else {
+		VDEC_ASSERT(0);
+		return IMG_ERROR_NOT_SUPPORTED;
+	}
+
+	return 0;
+}
+
+/*
+ * This function determines the BSPP unit type based on the
+ * provided bitstream (H264 specific) unit type
+ */
+void bspp_h264_determine_unittype(unsigned char bitstream_unittype,
+				  int disable_mvc,
+				  enum bspp_unit_type *bspp_unittype)
+{
+	unsigned char type = bitstream_unittype & 0x1f;
+
+	switch (type) {
+	case H264_NALTYPE_SLICE_PREFIX:
+		*bspp_unittype = disable_mvc ? BSPP_UNIT_UNCLASSIFIED : BSPP_UNIT_PICTURE;
+		break;
+	case H264_NALTYPE_SUBSET_SPS:
+		*bspp_unittype = disable_mvc ? BSPP_UNIT_UNCLASSIFIED : BSPP_UNIT_SEQUENCE;
+		break;
+	case H264_NALTYPE_SLICE_SCALABLE:
+	case H264_NALTYPE_SLICE_IDR_SCALABLE:
+		*bspp_unittype = disable_mvc ? BSPP_UNIT_NON_PICTURE : BSPP_UNIT_PICTURE;
+		break;
+	case H264_NALTYPE_SEQUENCE_PARAMETER_SET:
+		*bspp_unittype = BSPP_UNIT_SEQUENCE;
+		break;
+	case H264_NALTYPE_PICTURE_PARAMETER_SET:
+		*bspp_unittype = BSPP_UNIT_PPS;
+		break;
+	case H264_NALTYPE_SLICE:
+	case H264_NALTYPE_SLICE_PARTITION_A:
+	case H264_NALTYPE_SLICE_PARTITION_B:
+	case H264_NALTYPE_SLICE_PARTITION_C:
+	case H264_NALTYPE_IDR_SLICE:
+		*bspp_unittype = BSPP_UNIT_PICTURE;
+		break;
+	case H264_NALTYPE_ACCESS_UNIT_DELIMITER:
+	case H264_NALTYPE_SUPPLEMENTAL_ENHANCEMENT_INFO:
+		/*
+		 * Each of these NAL units should not change unit type if
+		 * current is picture, since they can occur anywhere, any number
+		 * of times
+		 */
+		*bspp_unittype = BSPP_UNIT_UNCLASSIFIED;
+		break;
+	case H264_NALTYPE_END_OF_SEQUENCE:
+	case H264_NALTYPE_END_OF_STREAM:
+	case H264_NALTYPE_FILLER_DATA:
+	case H264_NALTYPE_SEQUENCE_PARAMETER_SET_EXTENSION:
+	case H264_NALTYPE_AUXILIARY_SLICE:
+		*bspp_unittype = BSPP_UNIT_NON_PICTURE;
+		break;
+	default:
+		*bspp_unittype = BSPP_UNIT_UNSUPPORTED;
+		break;
+	}
+}
diff --git a/drivers/staging/media/vxd/decoder/h264_secure_parser.h b/drivers/staging/media/vxd/decoder/h264_secure_parser.h
new file mode 100644
index 000000000000..68789dfcc439
--- /dev/null
+++ b/drivers/staging/media/vxd/decoder/h264_secure_parser.h
@@ -0,0 +1,278 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * h.264 secure data unit parsing API.
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ *	Lakshmi Sankar <lakshmisankar-t@ti.com>
+ * Re-written for upstreming
+ *	Prashanth Kumar Amai <prashanth.ka@pathpartnertech.com>
+ *	Sidraya Jayagond <sidraya.bj@pathpartnertech.com>
+ */
+#ifndef __H264SECUREPARSER_H__
+#define __H264SECUREPARSER_H__
+
+#include "bspp_int.h"
+#include "vdec_defs.h"
+
+/*
+ * enum h264_nalunittype
+ * @Description Contains H264 NAL unit types
+ */
+enum h264_nalunittype {
+	H264_NALTYPE_UNSPECIFIED                      = 0,
+	H264_NALTYPE_SLICE                            = 1,
+	H264_NALTYPE_SLICE_PARTITION_A                = 2,
+	H264_NALTYPE_SLICE_PARTITION_B                = 3,
+	H264_NALTYPE_SLICE_PARTITION_C                = 4,
+	H264_NALTYPE_IDR_SLICE                        = 5,
+	H264_NALTYPE_SUPPLEMENTAL_ENHANCEMENT_INFO    = 6,
+	H264_NALTYPE_SEQUENCE_PARAMETER_SET           = 7,
+	H264_NALTYPE_PICTURE_PARAMETER_SET            = 8,
+	H264_NALTYPE_ACCESS_UNIT_DELIMITER            = 9,
+	H264_NALTYPE_END_OF_SEQUENCE                  = 10,
+	H264_NALTYPE_END_OF_STREAM                    = 11,
+	H264_NALTYPE_FILLER_DATA                      = 12,
+	H264_NALTYPE_SEQUENCE_PARAMETER_SET_EXTENSION = 13,
+	H264_NALTYPE_SLICE_PREFIX                     = 14,
+	H264_NALTYPE_SUBSET_SPS                       = 15,
+	H264_NALTYPE_AUXILIARY_SLICE                  = 19,
+	H264_NALTYPE_SLICE_SCALABLE                   = 20,
+	H264_NALTYPE_SLICE_IDR_SCALABLE               = 21,
+	H264_NALTYPE_MAX                              = 31,
+	H264_NALTYPE_FORCE32BITS                      = 0x7FFFFFFFU
+};
+
+/*
+ * struct bspp_h264_sps_info
+ * @Description	H264 SPS parsed information
+ */
+struct bspp_h264_sps_info {
+	unsigned int profile_idc;
+	unsigned int constraint_set_flags;
+	unsigned int level_idc;
+	unsigned char seq_parameter_set_id;
+	unsigned char chroma_format_idc;
+	int separate_colour_plane_flag;
+	unsigned int bit_depth_luma_minus8;
+	unsigned int bit_depth_chroma_minus8;
+	unsigned char qpprime_y_zero_transform_bypass_flag;
+	int seq_scaling_matrix_present_flag;
+	unsigned char seq_scaling_list_present_flag[12];
+	unsigned int log2_max_frame_num_minus4;
+	unsigned int pic_order_cnt_type;
+	unsigned int log2_max_pic_order_cnt_lsb_minus4;
+	int delta_pic_order_always_zero_flag;
+	int offset_for_non_ref_pic;
+	int offset_for_top_to_bottom_field;
+	unsigned int num_ref_frames_in_pic_order_cnt_cycle;
+	unsigned int *offset_for_ref_frame;
+	unsigned int max_num_ref_frames;
+	int gaps_in_frame_num_value_allowed_flag;
+	unsigned int pic_width_in_mbs_minus1;
+	unsigned int pic_height_in_map_units_minus1;
+	int frame_mbs_only_flag;
+	int mb_adaptive_frame_field_flag;
+	int direct_8x8_inference_flag;
+	int frame_cropping_flag;
+	unsigned int frame_crop_left_offset;
+	unsigned int frame_crop_right_offset;
+	unsigned int frame_crop_top_offset;
+	unsigned int frame_crop_bottom_offset;
+	int vui_parameters_present_flag;
+	/* mvc_vui_parameters_present_flag;   UNUSED */
+	int bmvcvuiparameterpresentflag;
+	/*
+	 * scaling lists are derived from both SPS and PPS information
+	 * but will change whenever the PPS changes
+	 * The derived set of tables are associated here with the PPS
+	 * NB: These are in H.264 order
+	 */
+	/* derived from SPS and PPS - 8 bit each */
+	unsigned char *scllst4x4seq;
+	/* derived from SPS and PPS - 8 bit each */
+	unsigned char *scllst8x8seq;
+	/* This is not direct parsed data, though it is extracted */
+	unsigned char usedefaultscalingmatrixflag_seq[12];
+};
+
+struct bspp_h264_hrdparam_info {
+	unsigned char cpb_cnt_minus1;
+	unsigned char bit_rate_scale;
+	unsigned char cpb_size_scale;
+	unsigned int *bit_rate_value_minus1;
+	unsigned int *cpb_size_value_minus1;
+	unsigned char *cbr_flag;
+	unsigned char initial_cpb_removal_delay_length_minus1;
+	unsigned char cpb_removal_delay_length_minus1;
+	unsigned char dpb_output_delay_length_minus1;
+	unsigned char time_offset_length;
+};
+
+struct bspp_h264_vui_info {
+	unsigned char aspect_ratio_info_present_flag;
+	unsigned int aspect_ratio_idc;
+	unsigned int sar_width;
+	unsigned int sar_height;
+	unsigned char overscan_info_present_flag;
+	unsigned char overscan_appropriate_flag;
+	unsigned char video_signal_type_present_flag;
+	unsigned int video_format;
+	unsigned char video_full_range_flag;
+	unsigned char colour_description_present_flag;
+	unsigned int colour_primaries;
+	unsigned int transfer_characteristics;
+	unsigned int matrix_coefficients;
+	unsigned char chroma_location_info_present_flag;
+	unsigned int chroma_sample_loc_type_top_field;
+	unsigned int chroma_sample_loc_type_bottom_field;
+	unsigned char timing_info_present_flag;
+	unsigned int num_units_in_tick;
+	unsigned int time_scale;
+	unsigned char fixed_frame_rate_flag;
+	unsigned char nal_hrd_parameters_present_flag;
+	struct bspp_h264_hrdparam_info nal_hrd_parameters;
+	unsigned char vcl_hrd_parameters_present_flag;
+	struct bspp_h264_hrdparam_info vcl_hrd_parameters;
+	unsigned char low_delay_hrd_flag;
+	unsigned char pic_struct_present_flag;
+	unsigned char bitstream_restriction_flag;
+	unsigned char motion_vectors_over_pic_boundaries_flag;
+	unsigned int max_bytes_per_pic_denom;
+	unsigned int max_bits_per_mb_denom;
+	unsigned int log2_max_mv_length_vertical;
+	unsigned int log2_max_mv_length_horizontal;
+	unsigned int num_reorder_frames;
+	unsigned int max_dec_frame_buffering;
+};
+
+/*
+ * struct bspp_h264_seq_hdr_info
+ * @Description	Contains everything parsed from the Sequence Header.
+ */
+struct bspp_h264_seq_hdr_info {
+	/* Video sequence header information */
+	struct bspp_h264_sps_info sps_info;
+	/* VUI sequence header information. */
+	struct bspp_h264_vui_info vui_info;
+};
+
+/**
+ * struct bspp_h264_ppssgm_info - This structure contains H264 PPS parse data.
+ * @slice_group_id: slice_group_id
+ * @slicegroupidnum: slicegroupidnum
+ */
+struct bspp_h264_ppssgm_info {
+	unsigned char *slice_group_id;
+	unsigned short slicegroupidnum;
+};
+
+/*
+ * struct bspp_h264_pps_info
+ * @Description	This structure contains H264 PPS parse data.
+ */
+struct bspp_h264_pps_info {
+	/* pic_parameter_set_id: defines the PPS ID of the current PPS */
+	int pps_id;
+	/* seq_parameter_set_id: defines the SPS that current PPS points to */
+	int seq_parameter_set_id;
+	int entropy_coding_mode_flag;
+	int pic_order_present_flag;
+	unsigned char num_slice_groups_minus1;
+	unsigned char slice_group_map_type;
+	unsigned short run_length_minus1[8];
+	unsigned short top_left[8];
+	unsigned short bottom_right[8];
+	int slice_group_change_direction_flag;
+	unsigned short slice_group_change_rate_minus1;
+	unsigned short pic_size_in_map_unit;
+	struct bspp_h264_ppssgm_info h264_ppssgm_info;
+	unsigned char num_ref_idx_lx_active_minus1[H264FW_MAX_REFPIC_LISTS];
+	int weighted_pred_flag;
+	unsigned char weighted_bipred_idc;
+	int pic_init_qp_minus26;
+	int pic_init_qs_minus26;
+	int chroma_qp_index_offset;
+	int deblocking_filter_control_present_flag;
+	int constrained_intra_pred_flag;
+	int redundant_pic_cnt_present_flag;
+	int transform_8x8_mode_flag;
+	int pic_scaling_matrix_present_flag;
+	unsigned char pic_scaling_list_present_flag[12];
+	int second_chroma_qp_index_offset;
+
+	/*
+	 * scaling lists are derived from both SPS and PPS information
+	 * but will change whenever the PPS changes
+	 * The derived set of tables are associated here with the PPS
+	 * NB: These are in H.264 order
+	 */
+	/* derived from SPS and PPS - 8 bit each */
+	unsigned char *scllst4x4pic;
+	/* derived from SPS and PPS - 8 bit each */
+	unsigned char *scllst8x8pic;
+	/* This is not direct parsed data, though it is extracted */
+	unsigned char usedefaultscalingmatrixflag_pic[12];
+};
+
+/*
+ * enum bspp_h264_slice_type
+ * @Description	contains H264 slice types
+ */
+enum bspp_h264_slice_type {
+	P_SLICE = 0,
+	B_SLICE,
+	I_SLICE,
+	SP_SLICE,
+	SI_SLICE,
+	SLICE_TYPE_FORCE32BITS = 0x7FFFFFFFU
+};
+
+/*
+ * struct bspp_h264_slice_hdr_info
+ * @Description This structure contains H264 slice header information
+ */
+struct bspp_h264_slice_hdr_info {
+	unsigned short first_mb_in_slice;
+	enum bspp_h264_slice_type slice_type;
+
+	/* data to ID new picture */
+	unsigned int pps_id;
+	unsigned int frame_num;
+	unsigned char colour_plane_id;
+	unsigned char field_pic_flag;
+	unsigned char bottom_field_flag;
+	unsigned int idr_pic_id;
+	unsigned int pic_order_cnt_lsb;
+	int delta_pic_order_cnt_bottom;
+	int delta_pic_order_cnt[2];
+	unsigned int redundant_pic_cnt;
+
+	/* Things we need to read out when doing In Secure */
+	unsigned char num_ref_idx_active_override_flag;
+	unsigned char num_ref_idx_lx_active_minus1[2];
+	unsigned short slice_group_change_cycle;
+};
+
+/*
+ * @Function	bspp_h264_set_parser_config
+ * @Description	Sets the parser configuration
+ */
+int bspp_h264_set_parser_config(enum vdec_bstr_format bstr_format,
+				struct bspp_vid_std_features *pvidstd_features,
+				struct bspp_swsr_ctx *pswsr_ctx,
+				struct bspp_parser_callbacks *pparser_callbacks,
+				struct bspp_inter_pict_data *pinterpict_data);
+
+/*
+ * @Function	bspp_h264_determine_unittype
+ * @Description	This function determines the BSPP unit type based on the
+ *		provided bitstream (H264 specific) unit type
+ */
+void bspp_h264_determine_unittype(unsigned char bitstream_unittype,
+				  int disable_mvc,
+				  enum bspp_unit_type *pbsppunittype);
+
+#endif /*__H264SECUREPARSER_H__ */
diff --git a/drivers/staging/media/vxd/decoder/hevc_secure_parser.c b/drivers/staging/media/vxd/decoder/hevc_secure_parser.c
new file mode 100644
index 000000000000..35fbd7155420
--- /dev/null
+++ b/drivers/staging/media/vxd/decoder/hevc_secure_parser.c
@@ -0,0 +1,2895 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * hevc secure data unit parsing API.
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ *	Angela Stegmaier <angelabaker@ti.com>
+ * Re-written for upstreming
+ *	Prashanth Kumar Amai <prashanth.ka@pathpartnertech.com>
+ *	Sidraya Jayagond <sidraya.bj@pathpartnertech.com>
+ */
+
+#include <linux/dma-mapping.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-mem2mem.h>
+
+#include "bspp_int.h"
+#include "hevc_secure_parser.h"
+#include "hevcfw_data.h"
+#include "pixel_api.h"
+#include "swsr.h"
+#include "vdec_defs.h"
+#include "vdecdd_utils.h"
+
+#if defined(DEBUG_DECODER_DRIVER)
+#define BSPP_HEVC_SYNTAX(fmt, ...)      pr_info("[hevc] " fmt, ## __VA_ARGS__)
+
+#else
+
+#define BSPP_HEVC_SYNTAX(fmt, ...)
+#endif
+
+static void HEVC_SWSR_U1(unsigned char *what, unsigned char *where, void *swsr_ctx)
+{
+	*where = swsr_read_bits(swsr_ctx, 1);
+#ifdef DEBUG_DECODER_DRIVER
+	pr_info("%s, u(1) : %u", what, *where);
+#endif
+}
+
+static void HEVC_SWSR_UN(unsigned char *what, unsigned int *where,
+			 unsigned char numbits, void *swsr_ctx)
+{
+	*where = swsr_read_bits(swsr_ctx, numbits);
+#ifdef DEBUG_DECODER_DRIVER
+	pr_info("%s, u(%u) : %u", what, numbits, *where);
+#endif
+}
+
+static void HEVC_SWSR_UE(unsigned char *what, unsigned int *where, void *swsr_ctx)
+{
+	*where = swsr_read_unsigned_expgoulomb(swsr_ctx);
+#ifdef DEBUG_DECODER_DRIVER
+	pr_info("%s, ue(v) : %u", what, *where);
+#endif
+}
+
+static void HEVC_SWSR_SE(unsigned char *what, int *where, void *swsr_ctx)
+{
+	*where = swsr_read_signed_expgoulomb(swsr_ctx);
+#ifdef DEBUG_DECODER_DRIVER
+	pr_info("%s, se(v) : %u", what, *where);
+#endif
+}
+
+static void HEVC_SWSR_FN(unsigned char *what, unsigned char *where,
+			 unsigned char numbits, unsigned char pattern,
+			 enum bspp_error_type *bspperror, void *swsr_ctx)
+{
+	*where = swsr_read_bits(swsr_ctx, numbits);
+#ifdef DEBUG_DECODER_DRIVER
+	pr_info("%s, f(%u) : %u", what, numbits, *where);
+#endif
+	if (*where != pattern) {
+		*bspperror |= BSPP_ERROR_INVALID_VALUE;
+		pr_warn("Invalid value of %s (f(%u), expected: %u, got: %u)",
+			what, numbits, pattern, *where);
+	}
+}
+
+static void HEVC_UCHECK(unsigned char *what, unsigned int val,
+			unsigned int expected,
+			enum bspp_error_type *bspperror)
+{
+	if (val != expected) {
+		*bspperror |= BSPP_ERROR_INVALID_VALUE;
+		pr_warn("Invalid value of %s (expected: %u, got: %u)",
+			what, expected, val);
+	}
+}
+
+static void HEVC_RANGEUCHECK(unsigned char *what, unsigned int val,
+			     unsigned int min, unsigned int max,
+	enum bspp_error_type *bspperror)
+{
+	if ((min > 0 && val < min) || val > max) {
+		*bspperror |= BSPP_ERROR_INVALID_VALUE;
+		pr_warn("Value of %s out of range (expected: [%u, %u], got: %u)",
+			what, min, max, val);
+	}
+}
+
+static void HEVC_RANGESCHECK(unsigned char *what, int val, int min, int max,
+			     enum bspp_error_type *bspperror)
+{
+	if (val < min || val > max) {
+		*bspperror |= BSPP_ERROR_INVALID_VALUE;
+		pr_warn("Value of %s out of range (expected: [%d, %d], got: %d)",
+			what, min, max, val);
+	}
+}
+
+#define HEVC_STATIC_ASSERT(expr) ((void)sizeof(unsigned char[1 - 2 * !(expr)]))
+
+#define HEVC_MIN(a, b, type) ({ \
+		type __a = a; \
+		type __b = b; \
+		(((__a) <= (__b)) ? (__a) : (__b)); })
+#define HEVC_MAX(a, b, type) ({ \
+		type __a = a; \
+		type __b = b; \
+		(((__a) >= (__b)) ? (__a) : (__b)); })
+#define HEVC_ALIGN(_val, _alignment, type) ({ \
+		type val = _val; \
+		type alignment = _alignment; \
+		(((val) + (alignment) - 1) & ~((alignment) - 1)); })
+
+static const enum pixel_fmt_idc pixelformat_idc[] = {
+	PIXEL_FORMAT_MONO,
+	PIXEL_FORMAT_420,
+	PIXEL_FORMAT_422,
+	PIXEL_FORMAT_444
+};
+
+static enum bspp_error_type bspp_hevc_parse_vps(void *sr_ctx, struct bspp_hevc_vps *vps);
+
+static void bspp_hevc_sublayhrdparams(void *sr_ctx,
+				      struct bspp_hevc_hrd_parameters *hrdparams,
+				      unsigned char sublayer_id);
+
+static void bspp_hevc_parsehrdparams(void *sr_ctx,
+				     struct bspp_hevc_hrd_parameters *hrdparams,
+				     unsigned char common_infpresent,
+				     unsigned char max_numsublayers_minus1);
+
+static enum bspp_error_type bspp_hevc_parsesps(void *sr_ctx,
+					       void *str_res,
+					       struct bspp_hevc_sps *sps);
+
+static enum bspp_error_type bspp_hevc_parsepps(void *sr_ctx, void *str_res,
+					       struct bspp_hevc_pps *pps);
+
+static int bspp_hevc_reset_ppsinfo(void *secure_ppsinfo);
+
+static void bspp_hevc_dotilecalculations(struct bspp_hevc_sps *sps,
+					 struct bspp_hevc_pps *pps);
+
+static enum bspp_error_type bspp_hevc_parse_slicesegmentheader
+		(void *sr_ctx, void *str_res,
+		 struct bspp_hevc_slice_segment_header *ssh,
+		 unsigned char nalunit_type,
+		 struct bspp_vps_info **vpsinfo,
+		 struct bspp_sequence_hdr_info **spsinfo,
+		 struct bspp_pps_info **ppsinfo);
+
+static enum bspp_error_type bspp_hevc_parse_profiletierlevel
+			(void *sr_ctx,
+			 struct bspp_hevc_profile_tierlevel *ptl,
+			 unsigned char vps_maxsublayers_minus1);
+
+static void bspp_hevc_getdefault_scalinglist(unsigned char size_id, unsigned char matrix_id,
+					     const unsigned char **default_scalinglist,
+					     unsigned int *size);
+
+static enum bspp_error_type bspp_hevc_parse_scalinglistdata
+				(void *sr_ctx,
+				 struct bspp_hevc_scalinglist_data *scaling_listdata);
+
+static void bspp_hevc_usedefault_scalinglists(struct bspp_hevc_scalinglist_data *scaling_listdata);
+
+static enum bspp_error_type bspp_hevc_parse_shortterm_refpicset
+		(void *sr_ctx,
+		 struct bspp_hevc_shortterm_refpicset *st_refpicset,
+		 unsigned char st_rps_idx,
+		 unsigned char in_slice_header);
+
+static void bspp_hevc_fillcommonseqhdr(struct bspp_hevc_sps *sps,
+				       struct vdec_comsequ_hdrinfo *common_seq);
+
+static void bspp_hevc_fillpicturehdr(struct vdec_comsequ_hdrinfo *common_seq,
+				     enum hevc_nalunittype nalunit_type,
+				     struct bspp_pict_hdr_info *picture_hdr,
+				     struct bspp_hevc_sps *sps,
+				     struct bspp_hevc_pps *pps,
+				     struct bspp_hevc_vps *vps);
+
+static void bspp_hevc_fill_fwsps(struct bspp_hevc_sps *sps,
+				 struct hevcfw_sequence_ps *fwsps);
+
+static void bspp_hevc_fill_fwst_rps(struct bspp_hevc_shortterm_refpicset *strps,
+				    struct hevcfw_short_term_ref_picset *fwstrps);
+
+static void bspp_hevc_fill_fwpps(struct bspp_hevc_pps *pps,
+				 struct hevcfw_picture_ps *fw_pps);
+
+static void bspp_hevc_fill_fw_scaling_lists(struct bspp_hevc_pps *pps,
+					    struct bspp_hevc_sps *sps,
+					    struct hevcfw_picture_ps *fw_pps);
+
+static unsigned int bspp_ceil_log2(unsigned int linear_val);
+
+static unsigned char bspp_hevc_picture_is_irap(enum hevc_nalunittype nalunit_type);
+
+static unsigned char bspp_hevc_picture_is_cra(enum hevc_nalunittype nalunit_type);
+
+static unsigned char bspp_hevc_picture_is_idr(enum hevc_nalunittype nalunit_type);
+
+static unsigned char bspp_hevc_picture_is_bla(enum hevc_nalunittype nalunit_type);
+
+static unsigned char bspp_hevc_picture_getnorasl_outputflag
+						(enum hevc_nalunittype nalunit_type,
+						 struct bspp_hevc_inter_pict_ctx *inter_pict_ctx);
+
+static unsigned char bspp_hevc_range_extensions_is_enabled
+					(struct bspp_hevc_profile_tierlevel *profile_tierlevel);
+
+static int bspp_hevc_unitparser(void *swsr_ctx, struct bspp_unit_data *unitdata)
+{
+	void *sr_ctx = swsr_ctx;
+	int result = 0;
+	enum bspp_error_type parse_err = BSPP_ERROR_NONE;
+	struct bspp_inter_pict_data *inter_pict_ctx =
+				unitdata->parse_state->inter_pict_ctx;
+	unsigned char forbidden_zero_bit = 0;
+	unsigned char nal_unit_type = 0;
+	unsigned char nuh_layer_id = 0;
+	unsigned char nuh_temporal_id_plus1 = 0;
+
+	HEVC_SWSR_FN("forbidden_zero_bit", &forbidden_zero_bit, 1, 0, &parse_err, sr_ctx);
+	HEVC_SWSR_UN("nal_unit_type", (unsigned int *)&nal_unit_type, 6, sr_ctx);
+	/* for current version of HEVC nuh_layer_id "shall be equal to 0" */
+	HEVC_SWSR_FN("nuh_layer_id", &nuh_layer_id, 6, 0, &parse_err, sr_ctx);
+	HEVC_SWSR_UN("nuh_temporal_id_plus1", (unsigned int *)&nuh_temporal_id_plus1, 3, sr_ctx);
+
+	switch (unitdata->unit_type) {
+	case BSPP_UNIT_VPS:
+	{
+		struct bspp_hevc_vps *vps =
+			(struct bspp_hevc_vps *)unitdata->out.vps_info->secure_vpsinfo;
+
+		unitdata->parse_error |= bspp_hevc_parse_vps(sr_ctx, vps);
+		unitdata->out.vps_info->vps_id =
+			vps->vps_video_parameter_set_id;
+	}
+	break;
+
+	case BSPP_UNIT_SEQUENCE:
+	{
+		struct bspp_ddbuf_array_info *tmp;
+		struct hevcfw_sequence_ps *fwsps;
+		struct vdec_comsequ_hdrinfo *common_seq;
+		struct bspp_hevc_sps *sps =
+			(struct bspp_hevc_sps *)unitdata->out.sequ_hdr_info->secure_sequence_info;
+
+		unitdata->parse_error |= bspp_hevc_parsesps(sr_ctx,
+				unitdata->str_res_handle,
+				sps);
+		unitdata->out.sequ_hdr_info->sequ_hdr_info.sequ_hdr_id =
+			sps->sps_seq_parameter_set_id;
+
+		tmp = &unitdata->out.sequ_hdr_info->fw_sequence;
+		/* handle firmware headers */
+		fwsps =
+		(struct hevcfw_sequence_ps *)((unsigned char *)tmp->ddbuf_info.cpu_virt_addr +
+			tmp->buf_offset);
+
+		bspp_hevc_fill_fwsps(sps, fwsps);
+
+		/* handle common sequence header */
+		common_seq =
+			&unitdata->out.sequ_hdr_info->sequ_hdr_info.com_sequ_hdr_info;
+
+		bspp_hevc_fillcommonseqhdr(sps, common_seq);
+	}
+	break;
+
+	case BSPP_UNIT_PPS:
+	{
+		struct bspp_ddbuf_array_info *tmp;
+		struct hevcfw_picture_ps *fw_pps;
+		struct bspp_hevc_pps *pps =
+			(struct bspp_hevc_pps *)unitdata->out.pps_info->secure_pps_info;
+
+		unitdata->parse_error |= bspp_hevc_parsepps(sr_ctx,
+				unitdata->str_res_handle,
+				pps);
+		unitdata->out.pps_info->pps_id = pps->pps_pic_parameter_set_id;
+
+		tmp = &unitdata->out.pps_info->fw_pps;
+		/* handle firmware headers */
+		fw_pps =
+		(struct hevcfw_picture_ps *)((unsigned char *)tmp->ddbuf_info.cpu_virt_addr +
+			tmp->buf_offset);
+		bspp_hevc_fill_fwpps(pps, fw_pps);
+	}
+	break;
+
+	case BSPP_UNIT_PICTURE:
+	{
+		struct bspp_hevc_slice_segment_header ssh;
+		struct bspp_vps_info *vps_info = NULL;
+		struct bspp_sequence_hdr_info *sequ_hdr_info = NULL;
+		struct bspp_hevc_sps *hevc_sps = NULL;
+		struct bspp_pps_info *ppsinfo = NULL;
+		enum bspp_error_type parse_error;
+		struct bspp_ddbuf_array_info *tmp;
+		struct hevcfw_picture_ps *fw_pps;
+		struct bspp_pict_data *pictdata;
+		struct bspp_hevc_pps *pps;
+
+		/*
+		 * EOS has to be attached to picture data, so it can be used
+		 * for NoRaslOutputFlag calculation in FW
+		 */
+		inter_pict_ctx->hevc_ctx.eos_detected = 0;
+		if (nal_unit_type == HEVC_NALTYPE_EOS) {
+			inter_pict_ctx->hevc_ctx.eos_detected = 1;
+			break;
+		}
+
+		parse_error = bspp_hevc_parse_slicesegmentheader(sr_ctx,
+								 unitdata->str_res_handle,
+								 &ssh,
+								 nal_unit_type,
+								 &vps_info,
+								 &sequ_hdr_info,
+								 &ppsinfo);
+		unitdata->parse_error |= parse_error;
+		unitdata->slice = 1;
+
+		if (parse_error != BSPP_ERROR_NONE &&
+		    parse_error != BSPP_ERROR_CORRECTION_VALIDVALUE) {
+			result = IMG_ERROR_CANCELLED;
+			break;
+		}
+
+		/* if we just started new picture. */
+		if (ssh.first_slice_segment_in_pic_flag) {
+			tmp = &ppsinfo->fw_pps;
+			/* handle firmware headers */
+			fw_pps =
+			(struct hevcfw_picture_ps *)((unsigned char *)tmp->ddbuf_info.cpu_virt_addr
+				+ tmp->buf_offset);
+
+			inter_pict_ctx->hevc_ctx.first_after_eos = 0;
+			if (inter_pict_ctx->hevc_ctx.eos_detected) {
+				inter_pict_ctx->hevc_ctx.first_after_eos = 1;
+				inter_pict_ctx->hevc_ctx.eos_detected = 0;
+			}
+
+			/* fill common picture header */
+			bspp_hevc_fillpicturehdr(&sequ_hdr_info->sequ_hdr_info.com_sequ_hdr_info,
+						 (enum hevc_nalunittype)nal_unit_type,
+						 unitdata->out.pict_hdr_info,
+						 (struct bspp_hevc_sps *)
+						 sequ_hdr_info->secure_sequence_info,
+						 (struct bspp_hevc_pps *)ppsinfo->secure_pps_info,
+						 (struct bspp_hevc_vps *)vps_info->secure_vpsinfo);
+
+			bspp_hevc_fill_fw_scaling_lists(ppsinfo->secure_pps_info,
+							sequ_hdr_info->secure_sequence_info,
+							fw_pps);
+
+			pictdata = &unitdata->out.pict_hdr_info->pict_aux_data;
+			/*
+			 * We have no container for the PPS that passes down
+			 * to the kernel, for this reason the hevc secure parser
+			 * needs to populate that info into the picture
+			 * header PictAuxData.
+			 */
+			pictdata->bufmap_id = ppsinfo->bufmap_id;
+			pictdata->buf_offset = ppsinfo->buf_offset;
+			pictdata->pic_data = fw_pps;
+			pictdata->id = fw_pps->pps_pic_parameter_set_id;
+			pictdata->size = sizeof(*fw_pps);
+
+			ppsinfo->ref_count++;
+
+			/* new Coded Video Sequence indication */
+			if (nal_unit_type == HEVC_NALTYPE_IDR_W_RADL ||
+			    nal_unit_type == HEVC_NALTYPE_IDR_N_LP ||
+			    nal_unit_type == HEVC_NALTYPE_BLA_N_LP ||
+			    nal_unit_type == HEVC_NALTYPE_BLA_W_RADL ||
+			    nal_unit_type == HEVC_NALTYPE_BLA_W_LP ||
+			    nal_unit_type == HEVC_NALTYPE_CRA) {
+				unitdata->new_closed_gop = 1;
+				inter_pict_ctx->hevc_ctx.seq_pic_count = 0;
+			}
+
+			/* Attach SEI data to the picture. */
+	if (!inter_pict_ctx->hevc_ctx.sei_info_attached_to_pic) {
+				/*
+				 *  If there is already a non-empty SEI list
+				 *  available
+				 */
+		if (inter_pict_ctx->hevc_ctx.sei_rawdata_list) {
+			/* attach it to the picture header. */
+			unitdata->out.pict_hdr_info->hevc_pict_hdr_info.raw_sei_datalist_firstfield
+					=
+				(void *)inter_pict_ctx->hevc_ctx.sei_rawdata_list;
+			inter_pict_ctx->hevc_ctx.sei_info_attached_to_pic = 1;
+		} else {
+				/* Otherwise expose a handle a picture header field to
+				 * attach SEI list later.
+				 */
+			inter_pict_ctx->hevc_ctx.hndl_pichdr_sei_rawdata_list =
+		&unitdata->out.pict_hdr_info->hevc_pict_hdr_info.raw_sei_datalist_firstfield;
+			}
+	}
+
+			/* Attach raw VUI data to the picture header. */
+			hevc_sps = (struct bspp_hevc_sps *)sequ_hdr_info->secure_sequence_info;
+			if (hevc_sps->vui_raw_data) {
+				hevc_sps->vui_raw_data->ref_count++;
+				unitdata->out.pict_hdr_info->hevc_pict_hdr_info.raw_vui_data =
+					(void *)hevc_sps->vui_raw_data;
+			}
+
+			inter_pict_ctx->hevc_ctx.seq_pic_count++;
+
+			/* NoOutputOfPriorPicsFlag */
+			inter_pict_ctx->not_dpb_flush = 0;
+			if (unitdata->new_closed_gop &&
+			    bspp_hevc_picture_is_irap((enum hevc_nalunittype)nal_unit_type) &&
+			    bspp_hevc_picture_getnorasl_outputflag((enum hevc_nalunittype)
+								   nal_unit_type,
+								   &inter_pict_ctx->hevc_ctx)) {
+				if (bspp_hevc_picture_is_cra((enum hevc_nalunittype)nal_unit_type))
+					inter_pict_ctx->not_dpb_flush = 1;
+				else
+					inter_pict_ctx->not_dpb_flush =
+						ssh.no_output_of_prior_pics_flag;
+			}
+
+			unitdata->parse_state->next_pic_is_new = 0;
+		}
+
+		pps = (struct bspp_hevc_pps *)ppsinfo->secure_pps_info;
+		unitdata->pict_sequ_hdr_id = pps->pps_seq_parameter_set_id;
+	}
+	break;
+
+	case BSPP_UNIT_UNCLASSIFIED:
+	case BSPP_UNIT_NON_PICTURE:
+	case BSPP_UNIT_UNSUPPORTED:
+		break;
+
+	default:
+		VDEC_ASSERT("Unknown BSPP Unit Type" == NULL);
+		break;
+	}
+
+	return result;
+}
+
+static void bspp_hevc_initialiseparsing(struct bspp_parse_state *parse_state)
+{
+	/* Indicate that SEI info has not yet been attached to this picture. */
+	parse_state->inter_pict_ctx->hevc_ctx.sei_info_attached_to_pic = 0;
+}
+
+static void bspp_hevc_finaliseparsing(void *str_alloc, struct bspp_parse_state *parse_state)
+{
+	/*
+	 * If SEI info has not yet been attached to the picture and
+	 * there is anything to be attached.
+	 */
+	if (!parse_state->inter_pict_ctx->hevc_ctx.sei_info_attached_to_pic &&
+	    parse_state->inter_pict_ctx->hevc_ctx.sei_rawdata_list) {
+		/* attach the SEI list if there is a handle provided for that. */
+		if (parse_state->inter_pict_ctx->hevc_ctx.hndl_pichdr_sei_rawdata_list) {
+			/* Attach the raw SEI list to the picture. */
+			*parse_state->inter_pict_ctx->hevc_ctx.hndl_pichdr_sei_rawdata_list =
+				(void *)parse_state->inter_pict_ctx->hevc_ctx.sei_rawdata_list;
+			/* Reset the inter-picture data. */
+			parse_state->inter_pict_ctx->hevc_ctx.hndl_pichdr_sei_rawdata_list = NULL;
+		} else {
+			/* Nowhere to attach the raw SEI list, so just free it. */
+			bspp_freeraw_sei_datalist
+				(str_alloc, parse_state->inter_pict_ctx->hevc_ctx.sei_rawdata_list);
+		}
+	}
+
+	/* Indicate that SEI info has been attached to the picture. */
+	parse_state->inter_pict_ctx->hevc_ctx.sei_info_attached_to_pic = 1;
+	/* Reset the inter-picture SEI list. */
+	parse_state->inter_pict_ctx->hevc_ctx.sei_rawdata_list = NULL;
+}
+
+static enum bspp_error_type bspp_hevc_parse_vps(void *sr_ctx, struct bspp_hevc_vps *vps)
+{
+	unsigned int parse_err = BSPP_ERROR_NONE;
+	unsigned int i, j;
+
+	VDEC_ASSERT(vps);
+	VDEC_ASSERT(sr_ctx);
+
+	memset(vps, 0, sizeof(struct bspp_hevc_vps));
+
+	HEVC_SWSR_UN("vps_video_parameter_set_id",
+		     (unsigned int *)&vps->vps_video_parameter_set_id, 4, sr_ctx);
+	HEVC_SWSR_UN("vps_reserved_three_2bits",
+		     (unsigned int *)&vps->vps_reserved_three_2bits, 2, sr_ctx);
+	HEVC_SWSR_UN("vps_max_layers_minus1",
+		     (unsigned int *)&vps->vps_max_layers_minus1, 6, sr_ctx);
+	HEVC_SWSR_UN("vps_max_sub_layers_minus1",
+		     (unsigned int *)&vps->vps_max_sub_layers_minus1, 3, sr_ctx);
+	HEVC_RANGEUCHECK("vps_max_sub_layers_minus1", vps->vps_max_sub_layers_minus1, 0,
+			 HEVC_MAX_NUM_SUBLAYERS - 1, &parse_err);
+	HEVC_SWSR_U1("vps_temporal_id_nesting_flag",
+		     &vps->vps_temporal_id_nesting_flag, sr_ctx);
+	HEVC_SWSR_UN("vps_reserved_0xffff_16bits",
+		     (unsigned int *)&vps->vps_reserved_0xffff_16bits, 16, sr_ctx);
+
+	if (vps->vps_max_sub_layers_minus1 == 0)
+		HEVC_UCHECK("vps_temporal_id_nesting_flag",
+			    vps->vps_temporal_id_nesting_flag, 1, &parse_err);
+
+	parse_err |= bspp_hevc_parse_profiletierlevel(sr_ctx, &vps->profiletierlevel,
+						      vps->vps_max_sub_layers_minus1);
+
+	HEVC_SWSR_U1("vps_sub_layer_ordering_info_present_flag",
+		     &vps->vps_sub_layer_ordering_info_present_flag, sr_ctx);
+	for (i = vps->vps_sub_layer_ordering_info_present_flag ?
+		0 : vps->vps_max_sub_layers_minus1;
+		i <= vps->vps_max_sub_layers_minus1; ++i) {
+		HEVC_SWSR_UE("vps_max_dec_pic_buffering_minus1",
+			     (unsigned int *)&vps->vps_max_dec_pic_buffering_minus1[i], sr_ctx);
+		HEVC_SWSR_UE("vps_max_num_reorder_pics",
+			     (unsigned int *)&vps->vps_max_num_reorder_pics[i], sr_ctx);
+		HEVC_SWSR_UE("vps_max_latency_increase_plus1",
+			     (unsigned int *)&vps->vps_max_latency_increase_plus1[i], sr_ctx);
+	}
+
+	HEVC_SWSR_UN("vps_max_layer_id", (unsigned int *)&vps->vps_max_layer_id, 6, sr_ctx);
+	HEVC_SWSR_UE("vps_num_layer_sets_minus1",
+		     (unsigned int *)&vps->vps_num_layer_sets_minus1, sr_ctx);
+
+	for (i = 1; i <= vps->vps_num_layer_sets_minus1; ++i) {
+		for (j = 0; j <= vps->vps_max_layer_id; ++j) {
+			HEVC_SWSR_U1("layer_id_included_flag",
+				     &vps->layer_id_included_flag[i][j], sr_ctx);
+		}
+	}
+
+	HEVC_SWSR_U1("vps_timing_info_present_flag", &vps->vps_timing_info_present_flag, sr_ctx);
+	if (vps->vps_timing_info_present_flag) {
+		HEVC_SWSR_UN("vps_num_units_in_tick",
+			     (unsigned int *)&vps->vps_num_units_in_tick, 32, sr_ctx);
+		HEVC_SWSR_UN("vps_time_scale",
+			     (unsigned int *)&vps->vps_time_scale, 32, sr_ctx);
+		HEVC_SWSR_U1("vps_poc_proportional_to_timing_flag",
+			     &vps->vps_poc_proportional_to_timing_flag, sr_ctx);
+		if (vps->vps_poc_proportional_to_timing_flag)
+			HEVC_SWSR_UE("vps_num_ticks_poc_diff_one_minus1",
+				     (unsigned int *)&vps->vps_num_ticks_poc_diff_one_minus1,
+				     sr_ctx);
+
+		HEVC_SWSR_UE("vps_num_hrd_parameters",
+			     (unsigned int *)&vps->vps_num_hrd_parameters, sr_ctx);
+
+		/* consume hrd_parameters */
+		for (i = 0; i < vps->vps_num_hrd_parameters; i++) {
+			unsigned short hrd_layer_set_idx;
+			unsigned char cprms_present_flag = 1;
+			struct bspp_hevc_hrd_parameters hrdparams;
+
+			HEVC_SWSR_UE("hrd_layer_set_idx",
+				     (unsigned int *)&hrd_layer_set_idx, sr_ctx);
+			if (i > 0)
+				HEVC_SWSR_U1("cprms_present_flag", &cprms_present_flag, sr_ctx);
+
+			bspp_hevc_parsehrdparams(sr_ctx, &hrdparams,
+						 cprms_present_flag,
+						 vps->vps_max_sub_layers_minus1);
+		}
+	}
+	HEVC_SWSR_U1("vps_extension_flag", &vps->vps_extension_flag, sr_ctx);
+
+	return (enum bspp_error_type)parse_err;
+}
+
+static void bspp_hevc_sublayhrdparams(void *sr_ctx,
+				      struct bspp_hevc_hrd_parameters *hrdparams,
+				      unsigned char sublayer_id)
+{
+	unsigned char i;
+	unsigned char cpb_cnt = hrdparams->cpb_cnt_minus1[sublayer_id];
+	struct bspp_hevc_sublayer_hrd_parameters *sublay_hrdparams =
+		&hrdparams->sublayhrdparams[sublayer_id];
+
+	VDEC_ASSERT(sr_ctx);
+	VDEC_ASSERT(hrdparams);
+	VDEC_ASSERT(cpb_cnt < HEVC_MAX_CPB_COUNT);
+	VDEC_ASSERT(sublayer_id < HEVC_MAX_NUM_SUBLAYERS);
+
+	for (i = 0; i <= cpb_cnt; i++) {
+		HEVC_SWSR_UE("bit_rate_value_minus1",
+			     (unsigned int *)&sublay_hrdparams->bit_rate_value_minus1[i], sr_ctx);
+		HEVC_SWSR_UE("cpb_size_value_minus1",
+			     (unsigned int *)&sublay_hrdparams->cpb_size_value_minus1[i], sr_ctx);
+		if (hrdparams->sub_pic_hrd_params_present_flag) {
+			HEVC_SWSR_UE("cpb_size_du_value_minus1",
+				     (unsigned int *)
+				     &sublay_hrdparams->cpb_size_du_value_minus1[i],
+				     sr_ctx);
+			HEVC_SWSR_UE("bit_rate_du_value_minus1",
+				     (unsigned int *)
+				     &sublay_hrdparams->bit_rate_du_value_minus1[i],
+				     sr_ctx);
+		}
+		HEVC_SWSR_U1("cbr_flag", &sublay_hrdparams->cbr_flag[i], sr_ctx);
+	}
+}
+
+static void bspp_hevc_parsehrdparams(void *sr_ctx,
+				     struct bspp_hevc_hrd_parameters *hrdparams,
+				     unsigned char common_infpresent,
+				     unsigned char max_numsublayers_minus1)
+{
+	unsigned char i;
+
+	VDEC_ASSERT(sr_ctx);
+	VDEC_ASSERT(hrdparams);
+	VDEC_ASSERT(max_numsublayers_minus1 < HEVC_MAX_NUM_SUBLAYERS);
+
+	memset(hrdparams, 0, sizeof(struct bspp_hevc_hrd_parameters));
+
+	if (common_infpresent) {
+		HEVC_SWSR_U1("nal_hrd_parameters_present_flag",
+			     &hrdparams->nal_hrd_parameters_present_flag, sr_ctx);
+		HEVC_SWSR_U1("vcl_hrd_parameters_present_flag",
+			     &hrdparams->vcl_hrd_parameters_present_flag, sr_ctx);
+		if (hrdparams->nal_hrd_parameters_present_flag ||
+		    hrdparams->vcl_hrd_parameters_present_flag) {
+			HEVC_SWSR_U1("sub_pic_hrd_params_present_flag",
+				     &hrdparams->sub_pic_hrd_params_present_flag,
+				     sr_ctx);
+			if (hrdparams->sub_pic_hrd_params_present_flag) {
+				HEVC_SWSR_UN("tick_divisor_minus2",
+					     (unsigned int *)&hrdparams->tick_divisor_minus2,
+					     8, sr_ctx);
+				HEVC_SWSR_UN
+				("du_cpb_removal_delay_increment_length_minus1",
+				 (unsigned int *)
+				 &hrdparams->du_cpb_removal_delay_increment_length_minus1,
+				 5, sr_ctx);
+				HEVC_SWSR_U1("sub_pic_cpb_params_in_pic_timing_sei_flag",
+					     &hrdparams->sub_pic_cpb_params_in_pic_timing_sei_flag,
+					     sr_ctx);
+				HEVC_SWSR_UN("dpb_output_delay_du_length_minus1",
+					     (unsigned int *)
+					     &hrdparams->dpb_output_delay_du_length_minus1,
+					     5, sr_ctx);
+			}
+			HEVC_SWSR_UN("bit_rate_scale",
+				     (unsigned int *)&hrdparams->bit_rate_scale, 4, sr_ctx);
+			HEVC_SWSR_UN("cpb_size_scale",
+				     (unsigned int *)&hrdparams->cpb_size_scale, 4, sr_ctx);
+			if (hrdparams->sub_pic_hrd_params_present_flag)
+				HEVC_SWSR_UN("cpb_size_du_scale",
+					     (unsigned int *)&hrdparams->cpb_size_du_scale,
+					     4, sr_ctx);
+
+			HEVC_SWSR_UN("initial_cpb_removal_delay_length_minus1",
+				     (unsigned int *)
+				     &hrdparams->initial_cpb_removal_delay_length_minus1,
+				     5, sr_ctx);
+			HEVC_SWSR_UN("au_cpb_removal_delay_length_minus1",
+				     (unsigned int *)&hrdparams->au_cpb_removal_delay_length_minus1,
+				     5, sr_ctx);
+			HEVC_SWSR_UN("dpb_output_delay_length_minus1",
+				     (unsigned int *)&hrdparams->dpb_output_delay_length_minus1,
+				     5, sr_ctx);
+		}
+	}
+	for (i = 0; i <= max_numsublayers_minus1; i++) {
+		HEVC_SWSR_U1("fixed_pic_rate_general_flag",
+			     &hrdparams->fixed_pic_rate_general_flag[i], sr_ctx);
+		hrdparams->fixed_pic_rate_within_cvs_flag[i] =
+			hrdparams->fixed_pic_rate_general_flag[i];
+		if (!hrdparams->fixed_pic_rate_general_flag[i])
+			HEVC_SWSR_U1("fixed_pic_rate_within_cvs_flag",
+				     &hrdparams->fixed_pic_rate_within_cvs_flag[i],
+				     sr_ctx);
+
+		if (hrdparams->fixed_pic_rate_within_cvs_flag[i])
+			HEVC_SWSR_UE("elemental_duration_in_tc_minus1",
+				     (unsigned int *)&hrdparams->elemental_duration_in_tc_minus1[i],
+				     sr_ctx);
+		else
+			HEVC_SWSR_U1("low_delay_hrd_flag",
+				     &hrdparams->low_delay_hrd_flag[i], sr_ctx);
+
+		if (!hrdparams->low_delay_hrd_flag[i])
+			HEVC_SWSR_UE("cpb_cnt_minus1",
+				     (unsigned int *)&hrdparams->cpb_cnt_minus1[i], sr_ctx);
+
+		if (hrdparams->nal_hrd_parameters_present_flag)
+			bspp_hevc_sublayhrdparams(sr_ctx, hrdparams, i);
+
+		if (hrdparams->vcl_hrd_parameters_present_flag)
+			bspp_hevc_sublayhrdparams(sr_ctx, hrdparams, i);
+	}
+}
+
+static enum bspp_error_type bspp_hevc_parsevui_parameters
+			(void *sr_ctx,
+			 struct bspp_hevc_vui_params *vui_params,
+			 unsigned char sps_max_sub_layers_minus1)
+{
+	enum bspp_error_type parse_err = BSPP_ERROR_NONE;
+
+	VDEC_ASSERT(sr_ctx);
+	VDEC_ASSERT(vui_params);
+
+	memset(vui_params, 0, sizeof(struct bspp_hevc_vui_params));
+
+	HEVC_SWSR_U1("aspect_ratio_info_present_flag",
+		     &vui_params->aspect_ratio_info_present_flag, sr_ctx);
+	if (vui_params->aspect_ratio_info_present_flag) {
+		HEVC_SWSR_UN("aspect_ratio_idc",
+			     (unsigned int *)&vui_params->aspect_ratio_idc, 8, sr_ctx);
+		if (vui_params->aspect_ratio_idc == HEVC_EXTENDED_SAR) {
+			HEVC_SWSR_UN("sar_width",
+				     (unsigned int *)&vui_params->sar_width, 16, sr_ctx);
+			HEVC_SWSR_UN("sar_height",
+				     (unsigned int *)&vui_params->sar_height, 16, sr_ctx);
+		}
+	}
+	HEVC_SWSR_U1("overscan_info_present_flag",
+		     &vui_params->overscan_info_present_flag, sr_ctx);
+
+	if (vui_params->overscan_info_present_flag)
+		HEVC_SWSR_U1("overscan_appropriate_flag",
+			     &vui_params->overscan_appropriate_flag, sr_ctx);
+
+	HEVC_SWSR_U1("video_signal_type_present_flag",
+		     &vui_params->video_signal_type_present_flag, sr_ctx);
+
+	if (vui_params->video_signal_type_present_flag) {
+		HEVC_SWSR_UN("video_format",
+			     (unsigned int *)&vui_params->video_format, 3, sr_ctx);
+		HEVC_SWSR_U1("video_full_range_flag",
+			     &vui_params->video_full_range_flag, sr_ctx);
+		HEVC_SWSR_U1("colour_description_present_flag",
+			     &vui_params->colour_description_present_flag,
+			     sr_ctx);
+		if (vui_params->colour_description_present_flag) {
+			HEVC_SWSR_UN("colour_primaries",
+				     (unsigned int *)&vui_params->colour_primaries, 8, sr_ctx);
+			HEVC_SWSR_UN("transfer_characteristics",
+				     (unsigned int *)&vui_params->transfer_characteristics,
+				     8, sr_ctx);
+			HEVC_SWSR_UN("matrix_coeffs",
+				     (unsigned int *)&vui_params->matrix_coeffs, 8, sr_ctx);
+		}
+	}
+
+	HEVC_SWSR_U1("chroma_loc_info_present_flag",
+		     &vui_params->chroma_loc_info_present_flag, sr_ctx);
+	if (vui_params->chroma_loc_info_present_flag) {
+		HEVC_SWSR_UE("chroma_sample_loc_type_top_field",
+			     (unsigned int *)&vui_params->chroma_sample_loc_type_top_field,
+			     sr_ctx);
+		HEVC_RANGEUCHECK("chroma_sample_loc_type_top_field",
+				 vui_params->chroma_sample_loc_type_top_field,
+				 0, 5, &parse_err);
+		HEVC_SWSR_UE("chroma_sample_loc_type_bottom_field",
+			     (unsigned int *)&vui_params->chroma_sample_loc_type_bottom_field,
+			     sr_ctx);
+		HEVC_RANGEUCHECK("chroma_sample_loc_type_bottom_field",
+				 vui_params->chroma_sample_loc_type_bottom_field,
+				 0, 5, &parse_err);
+	}
+	HEVC_SWSR_U1("neutral_chroma_indication_flag",
+		     &vui_params->neutral_chroma_indication_flag, sr_ctx);
+	HEVC_SWSR_U1("field_seq_flag",
+		     &vui_params->field_seq_flag, sr_ctx);
+	HEVC_SWSR_U1("frame_field_info_present_flag",
+		     &vui_params->frame_field_info_present_flag, sr_ctx);
+	HEVC_SWSR_U1("default_display_window_flag",
+		     &vui_params->default_display_window_flag, sr_ctx);
+	if (vui_params->default_display_window_flag) {
+		HEVC_SWSR_UE("def_disp_win_left_offset",
+			     (unsigned int *)&vui_params->def_disp_win_left_offset, sr_ctx);
+		HEVC_SWSR_UE("def_disp_win_right_offset",
+			     (unsigned int *)&vui_params->def_disp_win_right_offset, sr_ctx);
+		HEVC_SWSR_UE("def_disp_win_top_offset",
+			     (unsigned int *)&vui_params->def_disp_win_top_offset, sr_ctx);
+		HEVC_SWSR_UE("def_disp_win_bottom_offset",
+			     (unsigned int *)&vui_params->def_disp_win_bottom_offset, sr_ctx);
+	}
+	HEVC_SWSR_U1("vui_timing_info_present_flag",
+		     &vui_params->vui_timing_info_present_flag, sr_ctx);
+	if (vui_params->vui_timing_info_present_flag) {
+		HEVC_SWSR_UN("vui_num_units_in_tick",
+			     (unsigned int *)&vui_params->vui_num_units_in_tick, 32, sr_ctx);
+		HEVC_SWSR_UN("vui_time_scale",
+			     (unsigned int *)&vui_params->vui_time_scale, 32, sr_ctx);
+		HEVC_SWSR_U1("vui_poc_proportional_to_timing_flag",
+			     &vui_params->vui_poc_proportional_to_timing_flag,
+			     sr_ctx);
+		if (vui_params->vui_poc_proportional_to_timing_flag)
+			HEVC_SWSR_UE("vui_num_ticks_poc_diff_one_minus1",
+				     (unsigned int *)&vui_params->vui_num_ticks_poc_diff_one_minus1,
+				     sr_ctx);
+
+		HEVC_SWSR_U1("vui_hrd_parameters_present_flag",
+			     &vui_params->vui_hrd_parameters_present_flag,
+			     sr_ctx);
+		if (vui_params->vui_hrd_parameters_present_flag)
+			bspp_hevc_parsehrdparams(sr_ctx, &vui_params->vui_hrd_params,
+						 1, sps_max_sub_layers_minus1);
+	}
+	HEVC_SWSR_U1("bitstream_restriction_flag",
+		     &vui_params->bitstream_restriction_flag, sr_ctx);
+
+	if (vui_params->bitstream_restriction_flag) {
+		HEVC_SWSR_U1("tiles_fixed_structure_flag",
+			     &vui_params->tiles_fixed_structure_flag, sr_ctx);
+		HEVC_SWSR_U1("motion_vectors_over_pic_boundaries_flag",
+			     &vui_params->motion_vectors_over_pic_boundaries_flag,
+			     sr_ctx);
+		HEVC_SWSR_U1("restricted_ref_pic_lists_flag",
+			     &vui_params->restricted_ref_pic_lists_flag, sr_ctx);
+
+		HEVC_SWSR_UE("min_spatial_segmentation_idc",
+			     (unsigned int *)&vui_params->min_spatial_segmentation_idc, sr_ctx);
+		HEVC_RANGEUCHECK("min_spatial_segmentation_idc",
+				 vui_params->min_spatial_segmentation_idc,
+				 0, 4095, &parse_err);
+
+		HEVC_SWSR_UE("max_bytes_per_pic_denom",
+			     (unsigned int *)&vui_params->max_bytes_per_pic_denom, sr_ctx);
+		HEVC_RANGEUCHECK("max_bytes_per_pic_denom", vui_params->max_bytes_per_pic_denom,
+				 0, 16, &parse_err);
+
+		HEVC_SWSR_UE("max_bits_per_min_cu_denom",
+			     (unsigned int *)&vui_params->max_bits_per_min_cu_denom, sr_ctx);
+		HEVC_RANGEUCHECK("max_bits_per_min_cu_denom", vui_params->max_bits_per_min_cu_denom,
+				 0, 16, &parse_err);
+
+		HEVC_SWSR_UE("log2_max_mv_length_horizontal",
+			     (unsigned int *)&vui_params->log2_max_mv_length_horizontal, sr_ctx);
+		HEVC_RANGEUCHECK("log2_max_mv_length_horizontal",
+				 vui_params->log2_max_mv_length_horizontal,
+				 0, 16, &parse_err);
+
+		HEVC_SWSR_UE("log2_max_mv_length_vertical",
+			     (unsigned int *)&vui_params->log2_max_mv_length_vertical, sr_ctx);
+		HEVC_RANGEUCHECK("log2_max_mv_length_vertical",
+				 vui_params->log2_max_mv_length_vertical,
+				 0, 15, &parse_err);
+	}
+
+	return parse_err;
+}
+
+static enum bspp_error_type bspp_hevc_parse_spsrange_extensions
+					(void *sr_ctx,
+					 struct bspp_hevc_sps_range_exts *range_exts)
+{
+	enum bspp_error_type parse_err = BSPP_ERROR_NONE;
+
+	VDEC_ASSERT(sr_ctx);
+	VDEC_ASSERT(range_exts);
+
+	memset(range_exts, 0, sizeof(struct bspp_hevc_sps_range_exts));
+
+	HEVC_SWSR_U1("transform_skip_rotation_enabled_flag",
+		     &range_exts->transform_skip_rotation_enabled_flag, sr_ctx);
+	HEVC_SWSR_U1("transform_skip_context_enabled_flag",
+		     &range_exts->transform_skip_context_enabled_flag, sr_ctx);
+	HEVC_SWSR_U1("implicit_rdpcm_enabled_flag",
+		     &range_exts->implicit_rdpcm_enabled_flag, sr_ctx);
+	HEVC_SWSR_U1("explicit_rdpcm_enabled_flag",
+		     &range_exts->explicit_rdpcm_enabled_flag, sr_ctx);
+	HEVC_SWSR_U1("extended_precision_processing_flag",
+		     &range_exts->extended_precision_processing_flag, sr_ctx);
+	HEVC_UCHECK("extended_precision_processing_flag",
+		    range_exts->extended_precision_processing_flag,
+		    0, &parse_err);
+	HEVC_SWSR_U1("intra_smoothing_disabled_flag",
+		     &range_exts->intra_smoothing_disabled_flag, sr_ctx);
+	HEVC_SWSR_U1("high_precision_offsets_enabled_flag",
+		     &range_exts->high_precision_offsets_enabled_flag, sr_ctx);
+	HEVC_SWSR_U1("persistent_rice_adaptation_enabled_flag",
+		     &range_exts->persistent_rice_adaptation_enabled_flag,
+		     sr_ctx);
+	HEVC_SWSR_U1("cabac_bypass_alignment_enabled_flag",
+		     &range_exts->cabac_bypass_alignment_enabled_flag, sr_ctx);
+
+	return parse_err;
+}
+
+static unsigned char
+bspp_hevc_checksps_range_extensions(struct bspp_hevc_sps_range_exts *range_exts)
+{
+	VDEC_ASSERT(range_exts);
+
+	if (range_exts->transform_skip_rotation_enabled_flag ||
+	    range_exts->transform_skip_context_enabled_flag ||
+	    range_exts->implicit_rdpcm_enabled_flag ||
+	    range_exts->explicit_rdpcm_enabled_flag ||
+	    range_exts->extended_precision_processing_flag ||
+	    range_exts->intra_smoothing_disabled_flag ||
+	    range_exts->persistent_rice_adaptation_enabled_flag ||
+	    range_exts->cabac_bypass_alignment_enabled_flag)
+		return 1;
+	/*
+	 *  Note: high_precision_offsets_enabled_flag is supported even
+	 * if hw capabilities (bHevcRangeExt is not set)
+	 */
+	return 0;
+}
+
+static enum bspp_error_type bspp_hevc_parsesps(void *sr_ctx,
+					       void *str_res,
+					       struct bspp_hevc_sps *sps)
+{
+	enum bspp_error_type parse_err = BSPP_ERROR_NONE;
+	unsigned char i;
+	unsigned int min_cblog2_size_y;
+
+	if (!sr_ctx || !sps) {
+		VDEC_ASSERT(0);
+		return BSPP_ERROR_INVALID_VALUE;
+	}
+
+	memset(sps, 0, sizeof(struct bspp_hevc_sps));
+
+	HEVC_SWSR_UN("sps_video_parameter_set_id",
+		     (unsigned int *)&sps->sps_video_parameter_set_id, 4, sr_ctx);
+	HEVC_SWSR_UN("sps_max_sub_layers_minus1",
+		     (unsigned int *)&sps->sps_max_sub_layers_minus1, 3, sr_ctx);
+	HEVC_RANGEUCHECK("sps_max_sub_layers_minus1", sps->sps_max_sub_layers_minus1, 0,
+			 HEVC_MAX_NUM_SUBLAYERS - 1, &parse_err);
+	HEVC_SWSR_U1("sps_temporal_id_nesting_flag",
+		     &sps->sps_temporal_id_nesting_flag, sr_ctx);
+
+	if (sps->sps_max_sub_layers_minus1 == 0)
+		HEVC_UCHECK("sps_temporal_id_nesting_flag",
+			    sps->sps_temporal_id_nesting_flag, 1, &parse_err);
+
+	parse_err |= bspp_hevc_parse_profiletierlevel
+				(sr_ctx, &sps->profile_tier_level,
+				 sps->sps_max_sub_layers_minus1);
+
+	HEVC_SWSR_UE("sps_seq_parameter_set_id",
+		     (unsigned int *)&sps->sps_seq_parameter_set_id, sr_ctx);
+	HEVC_RANGEUCHECK("sps_seq_parameter_set_id", sps->sps_seq_parameter_set_id, 0,
+			 HEVC_MAX_SPS_COUNT - 1, &parse_err);
+
+	HEVC_SWSR_UE("chroma_format_idc", (unsigned int *)&sps->chroma_format_idc, sr_ctx);
+	HEVC_RANGEUCHECK("chroma_format_idc", sps->chroma_format_idc, 0, 3, &parse_err);
+
+	if (sps->chroma_format_idc == 3)
+		HEVC_SWSR_U1("separate_colour_plane_flag",
+			     &sps->separate_colour_plane_flag, sr_ctx);
+
+	HEVC_SWSR_UE("pic_width_in_luma_samples",
+		     (unsigned int *)&sps->pic_width_in_luma_samples, sr_ctx);
+	HEVC_SWSR_UE("pic_height_in_luma_samples",
+		     (unsigned int *)&sps->pic_height_in_luma_samples, sr_ctx);
+
+	HEVC_SWSR_U1("conformance_window_flag", &sps->conformance_window_flag, sr_ctx);
+
+	if (sps->pic_width_in_luma_samples == 0 ||
+	    sps->pic_height_in_luma_samples == 0) {
+		pr_warn("Invalid video dimensions (%u, %u)",
+			sps->pic_width_in_luma_samples,
+			sps->pic_height_in_luma_samples);
+		parse_err |= BSPP_ERROR_UNRECOVERABLE;
+	}
+
+	if (sps->conformance_window_flag) {
+		HEVC_SWSR_UE("conf_win_left_offset",
+			     (unsigned int *)&sps->conf_win_left_offset, sr_ctx);
+		HEVC_SWSR_UE("conf_win_right_offset",
+			     (unsigned int *)&sps->conf_win_right_offset, sr_ctx);
+		HEVC_SWSR_UE("conf_win_top_offset",
+			     (unsigned int *)&sps->conf_win_top_offset, sr_ctx);
+		HEVC_SWSR_UE("conf_win_bottom_offset",
+			     (unsigned int *)&sps->conf_win_bottom_offset, sr_ctx);
+	}
+
+	HEVC_SWSR_UE("bit_depth_luma_minus8",
+		     (unsigned int *)&sps->bit_depth_luma_minus8, sr_ctx);
+	HEVC_RANGEUCHECK("bit_depth_luma_minus8",
+			 sps->bit_depth_luma_minus8, 0, 6, &parse_err);
+	HEVC_SWSR_UE("bit_depth_chroma_minus8",
+		     (unsigned int *)&sps->bit_depth_chroma_minus8, sr_ctx);
+	HEVC_RANGEUCHECK("bit_depth_chroma_minus8", sps->bit_depth_chroma_minus8,
+			 0, 6, &parse_err);
+
+	HEVC_SWSR_UE("log2_max_pic_order_cnt_lsb_minus4",
+		     (unsigned int *)&sps->log2_max_pic_order_cnt_lsb_minus4, sr_ctx);
+	HEVC_RANGEUCHECK("log2_max_pic_order_cnt_lsb_minus4",
+			 sps->log2_max_pic_order_cnt_lsb_minus4,
+			 0, 12, &parse_err);
+
+	HEVC_SWSR_U1("sps_sub_layer_ordering_info_present_flag",
+		     &sps->sps_sub_layer_ordering_info_present_flag, sr_ctx);
+	for (i = (sps->sps_sub_layer_ordering_info_present_flag ?
+		0 : sps->sps_max_sub_layers_minus1);
+		i <= sps->sps_max_sub_layers_minus1; ++i) {
+		HEVC_SWSR_UE("sps_max_dec_pic_buffering_minus1",
+			     (unsigned int *)&sps->sps_max_dec_pic_buffering_minus1[i], sr_ctx);
+		HEVC_SWSR_UE("sps_max_num_reorder_pics",
+			     (unsigned int *)&sps->sps_max_num_reorder_pics[i], sr_ctx);
+		HEVC_SWSR_UE("sps_max_latency_increase_plus1",
+			     (unsigned int *)&sps->sps_max_latency_increase_plus1[i], sr_ctx);
+	}
+
+	HEVC_SWSR_UE("log2_min_luma_coding_block_size_minus3",
+		     (unsigned int *)&sps->log2_min_luma_coding_block_size_minus3, sr_ctx);
+	HEVC_SWSR_UE("log2_diff_max_min_luma_coding_block_size",
+		     (unsigned int *)&sps->log2_diff_max_min_luma_coding_block_size, sr_ctx);
+	HEVC_SWSR_UE("log2_min_transform_block_size_minus2",
+		     (unsigned int *)&sps->log2_min_transform_block_size_minus2, sr_ctx);
+	HEVC_SWSR_UE("log2_diff_max_min_transform_block_size",
+		     (unsigned int *)&sps->log2_diff_max_min_transform_block_size, sr_ctx);
+	HEVC_SWSR_UE("max_transform_hierarchy_depth_inter",
+		     (unsigned int *)&sps->max_transform_hierarchy_depth_inter, sr_ctx);
+	HEVC_SWSR_UE("max_transform_hierarchy_depth_intra",
+		     (unsigned int *)&sps->max_transform_hierarchy_depth_intra, sr_ctx);
+
+	HEVC_SWSR_U1("scaling_list_enabled_flag", &sps->scaling_list_enabled_flag, sr_ctx);
+
+	if (sps->scaling_list_enabled_flag) {
+		HEVC_SWSR_U1("sps_scaling_list_data_present_flag",
+			     &sps->sps_scaling_list_data_present_flag, sr_ctx);
+		if (sps->sps_scaling_list_data_present_flag)
+			parse_err |= bspp_hevc_parse_scalinglistdata(sr_ctx,
+								     &sps->scalinglist_data);
+		else
+			bspp_hevc_usedefault_scalinglists(&sps->scalinglist_data);
+	}
+
+	HEVC_SWSR_U1("amp_enabled_flag", &sps->amp_enabled_flag, sr_ctx);
+	HEVC_SWSR_U1("sample_adaptive_offset_enabled_flag",
+		     &sps->sample_adaptive_offset_enabled_flag, sr_ctx);
+	HEVC_SWSR_U1("pcm_enabled_flag", &sps->pcm_enabled_flag, sr_ctx);
+
+	if (sps->pcm_enabled_flag) {
+		HEVC_SWSR_UN("pcm_sample_bit_depth_luma_minus1",
+			     (unsigned int *)&sps->pcm_sample_bit_depth_luma_minus1,
+			     4, sr_ctx);
+		HEVC_SWSR_UN("pcm_sample_bit_depth_chroma_minus1",
+			     (unsigned int *)&sps->pcm_sample_bit_depth_chroma_minus1,
+			     4, sr_ctx);
+		HEVC_SWSR_UE("log2_min_pcm_luma_coding_block_size_minus3",
+			     (unsigned int *)&sps->log2_min_pcm_luma_coding_block_size_minus3,
+			     sr_ctx);
+		HEVC_SWSR_UE("log2_diff_max_min_pcm_luma_coding_block_size",
+			     (unsigned int *)&sps->log2_diff_max_min_pcm_luma_coding_block_size,
+			     sr_ctx);
+		HEVC_SWSR_U1("pcm_loop_filter_disabled_flag",
+			     &sps->pcm_loop_filter_disabled_flag, sr_ctx);
+	} else {
+		sps->pcm_sample_bit_depth_luma_minus1 = 7;
+		sps->pcm_sample_bit_depth_chroma_minus1 = 7;
+		sps->log2_min_pcm_luma_coding_block_size_minus3 = 0;
+		sps->log2_diff_max_min_pcm_luma_coding_block_size = 2;
+	}
+
+	HEVC_SWSR_UE("num_short_term_ref_pic_sets",
+		     (unsigned int *)&sps->num_short_term_ref_pic_sets, sr_ctx);
+	HEVC_RANGEUCHECK("num_short_term_ref_pic_sets", sps->num_short_term_ref_pic_sets, 0,
+			 HEVC_MAX_NUM_ST_REF_PIC_SETS - 1, &parse_err);
+
+	for (i = 0; i < sps->num_short_term_ref_pic_sets; ++i) {
+		parse_err |= bspp_hevc_parse_shortterm_refpicset(sr_ctx,
+				sps->rps_list,
+				i,
+				0);
+	}
+
+	HEVC_SWSR_U1("long_term_ref_pics_present_flag",
+		     &sps->long_term_ref_pics_present_flag, sr_ctx);
+	if (sps->long_term_ref_pics_present_flag) {
+		HEVC_SWSR_UE("num_long_term_ref_pics_sps",
+			     (unsigned int *)&sps->num_long_term_ref_pics_sps, sr_ctx);
+		HEVC_RANGEUCHECK("num_long_term_ref_pics_sps",
+				 sps->num_long_term_ref_pics_sps, 0,
+				 HEVC_MAX_NUM_LT_REF_PICS, &parse_err);
+		for (i = 0; i < sps->num_long_term_ref_pics_sps; ++i) {
+			HEVC_SWSR_UN("lt_ref_pic_poc_lsb_sps",
+				     (unsigned int *)&sps->lt_ref_pic_poc_lsb_sps[i],
+				     sps->log2_max_pic_order_cnt_lsb_minus4 + 4,
+				     sr_ctx);
+			HEVC_SWSR_U1("used_by_curr_pic_lt_sps_flag",
+				     &sps->used_by_curr_pic_lt_sps_flag[i],
+				     sr_ctx);
+		}
+	}
+
+	HEVC_SWSR_U1("sps_temporal_mvp_enabled_flag", &sps->sps_temporal_mvp_enabled_flag, sr_ctx);
+	HEVC_SWSR_U1("strong_intra_smoothing_enabled_flag",
+		     &sps->strong_intra_smoothing_enabled_flag, sr_ctx);
+	HEVC_SWSR_U1("vui_parameters_present_flag", &sps->vui_parameters_present_flag, sr_ctx);
+
+	if (sps->vui_parameters_present_flag)
+		bspp_hevc_parsevui_parameters(sr_ctx, &sps->vui_params,
+					      sps->sps_max_sub_layers_minus1);
+
+	HEVC_SWSR_U1("sps_extension_present_flag", &sps->sps_extension_present_flag, sr_ctx);
+	if (sps->sps_extension_present_flag &&
+	    bspp_hevc_range_extensions_is_enabled(&sps->profile_tier_level)) {
+		HEVC_SWSR_U1("sps_range_extensions_flag", &sps->sps_range_extensions_flag, sr_ctx);
+
+		HEVC_SWSR_UN("sps_extension_7bits", (unsigned int *)&sps->sps_extension_7bits, 7,
+			     sr_ctx);
+		/*
+		 *  ignore extension data. Although we inform
+		 * if some non-zero data was found
+		 */
+		HEVC_UCHECK("sps_extension_7bits", sps->sps_extension_7bits, 0, &parse_err);
+		/*
+		 * TODO ?: the newest HEVC spec (10/2014) splits
+		 * "sps_extension_7bits" to * sps_multilayer_extension_flag (1)
+		 * sps_extension_6bits (6)
+		 */
+		if (sps->sps_range_extensions_flag)
+			parse_err |= bspp_hevc_parse_spsrange_extensions
+						(sr_ctx, &sps->range_exts);
+	}
+	/*
+	 * calculate "derived" variables needed further in the parsing process
+	 * (of other headers) and save them for later use
+	 */
+	sps->sub_width_c = 1;
+	sps->sub_height_c = 1;
+	if (sps->chroma_format_idc == 2) {
+		sps->sub_width_c = 2;
+	} else if (sps->chroma_format_idc == 1) {
+		sps->sub_width_c = 2;
+		sps->sub_height_c = 2;
+	}
+
+	min_cblog2_size_y = sps->log2_min_luma_coding_block_size_minus3 + 3;
+	sps->ctb_log2size_y =
+		min_cblog2_size_y + sps->log2_diff_max_min_luma_coding_block_size;
+	sps->ctb_size_y = 1 << sps->ctb_log2size_y;
+
+	if (sps->ctb_size_y > 0) {
+		/* use integer division with rounding up */
+		sps->pic_width_in_ctbs_y =
+			(sps->pic_width_in_luma_samples + sps->ctb_size_y - 1)
+			/ sps->ctb_size_y;
+		sps->pic_height_in_ctbs_y =
+			(sps->pic_height_in_luma_samples + sps->ctb_size_y - 1)
+			/ sps->ctb_size_y;
+	} else {
+		parse_err |= BSPP_ERROR_INVALID_VALUE;
+	}
+
+	sps->pic_size_in_ctbs_y =
+		sps->pic_width_in_ctbs_y * sps->pic_height_in_ctbs_y;
+
+	sps->max_pic_order_cnt_lsb =
+		1 << (sps->log2_max_pic_order_cnt_lsb_minus4 + 4);
+
+	for (i = 0; i <= sps->sps_max_sub_layers_minus1; ++i) {
+		sps->sps_max_latency_pictures[i] =
+			sps->sps_max_num_reorder_pics[i] +
+			sps->sps_max_latency_increase_plus1[i] - 1;
+	}
+
+	BSPP_HEVC_SYNTAX("ctb_size_y: %u", sps->ctb_size_y);
+	BSPP_HEVC_SYNTAX("pic_width_in_ctbs_y: %u", sps->pic_width_in_ctbs_y);
+	BSPP_HEVC_SYNTAX("pic_height_in_ctbs_y: %u", sps->pic_height_in_ctbs_y);
+	BSPP_HEVC_SYNTAX("pic_size_in_ctbs_y: %u", sps->pic_size_in_ctbs_y);
+
+	return parse_err;
+}
+
+static int bspp_hevc_release_sequhdrinfo(void *str_alloc, void *secure_spsinfo)
+{
+	struct bspp_hevc_sps *hevc_sps = (struct bspp_hevc_sps *)secure_spsinfo;
+
+	if (!hevc_sps)
+		return IMG_ERROR_INVALID_PARAMETERS;
+
+	/* Release the raw VIU data. */
+	bspp_streamrelese_rawbstrdataplain(str_alloc, (void *)hevc_sps->vui_raw_data);
+	return 0;
+}
+
+static int bspp_hevc_releasedata(void *str_alloc, enum bspp_unit_type data_type,
+				 void *data_handle)
+{
+	int result = 0;
+
+	if (!data_handle)
+		return IMG_ERROR_INVALID_PARAMETERS;
+
+	switch (data_type) {
+	case BSPP_UNIT_SEQUENCE:
+		result = bspp_hevc_release_sequhdrinfo(str_alloc, data_handle);
+		break;
+	default:
+		break;
+	}
+
+	return result;
+}
+
+static int bspp_hevc_reset_ppsinfo(void *secure_ppsinfo)
+{
+	struct bspp_hevc_pps *hevc_pps = NULL;
+
+	if (!secure_ppsinfo)
+		return IMG_ERROR_INVALID_PARAMETERS;
+
+	hevc_pps = (struct bspp_hevc_pps *)secure_ppsinfo;
+
+	memset(hevc_pps, 0, sizeof(*hevc_pps));
+
+	return 0;
+}
+
+static int bspp_hevc_resetdata(enum bspp_unit_type data_type, void *data_handle)
+{
+	int result = 0;
+
+	switch (data_type) {
+	case BSPP_UNIT_PPS:
+		result = bspp_hevc_reset_ppsinfo(data_handle);
+		break;
+	default:
+		break;
+	}
+	return result;
+}
+
+static enum bspp_error_type bspp_hevc_parsepps_range_extensions
+			(void *sr_ctx,
+			 struct bspp_hevc_pps_range_exts *range_exts,
+			 unsigned char transform_skip_enabled_flag,
+			 unsigned char log2_diff_max_min_luma_coding_block_size)
+{
+	enum bspp_error_type parse_err = BSPP_ERROR_NONE;
+
+	VDEC_ASSERT(sr_ctx);
+	VDEC_ASSERT(range_exts);
+
+	memset(range_exts, 0, sizeof(struct bspp_hevc_pps_range_exts));
+
+	if (transform_skip_enabled_flag)
+		HEVC_SWSR_UE("log2_max_transform_skip_block_size_minus2",
+			     (unsigned int *)&range_exts->log2_max_transform_skip_block_size_minus2,
+			     sr_ctx);
+
+	HEVC_SWSR_U1("cross_component_prediction_enabled_flag",
+		     &range_exts->cross_component_prediction_enabled_flag,
+		     sr_ctx);
+	HEVC_UCHECK("cross_component_prediction_enabled_flag",
+		    range_exts->cross_component_prediction_enabled_flag, 0,
+		    &parse_err);
+
+	HEVC_SWSR_U1("chroma_qp_offset_list_enabled_flag",
+		     &range_exts->chroma_qp_offset_list_enabled_flag, sr_ctx);
+
+	if (range_exts->chroma_qp_offset_list_enabled_flag) {
+		unsigned char i;
+
+		HEVC_SWSR_UE("diff_cu_chroma_qp_offset_depth",
+			     (unsigned int *)&range_exts->diff_cu_chroma_qp_offset_depth,
+			     sr_ctx);
+		HEVC_RANGEUCHECK("diff_cu_chroma_qp_offset_depth",
+				 range_exts->diff_cu_chroma_qp_offset_depth, 0,
+				 log2_diff_max_min_luma_coding_block_size,
+				 &parse_err);
+
+		HEVC_SWSR_UE("chroma_qp_offset_list_len_minus1",
+			     (unsigned int *)&range_exts->chroma_qp_offset_list_len_minus1,
+			     sr_ctx);
+		HEVC_RANGEUCHECK("chroma_qp_offset_list_len_minus1",
+				 range_exts->chroma_qp_offset_list_len_minus1,
+				 0, HEVC_MAX_CHROMA_QP - 1, &parse_err);
+		for (i = 0; i <= range_exts->chroma_qp_offset_list_len_minus1; i++) {
+			HEVC_SWSR_SE("cb_qp_offset_list",
+				     (int *)&range_exts->cb_qp_offset_list[i], sr_ctx);
+			HEVC_RANGESCHECK("cb_qp_offset_list", range_exts->cb_qp_offset_list[i],
+					 -12, 12, &parse_err);
+			HEVC_SWSR_SE("cr_qp_offset_list",
+				     (int *)&range_exts->cr_qp_offset_list[i], sr_ctx);
+			HEVC_RANGESCHECK("cr_qp_offset_list", range_exts->cr_qp_offset_list[i],
+					 -12, 12, &parse_err);
+		}
+	}
+	HEVC_SWSR_UE("log2_sao_offset_scale_luma",
+		     (unsigned int *)&range_exts->log2_sao_offset_scale_luma, sr_ctx);
+	HEVC_UCHECK("log2_sao_offset_scale_luma",
+		    range_exts->log2_sao_offset_scale_luma, 0, &parse_err);
+	HEVC_SWSR_UE("log2_sao_offset_scale_chroma",
+		     (unsigned int *)&range_exts->log2_sao_offset_scale_chroma, sr_ctx);
+	HEVC_UCHECK("log2_sao_offset_scale_chroma",
+		    range_exts->log2_sao_offset_scale_chroma, 0, &parse_err);
+
+	return parse_err;
+}
+
+static unsigned char bspp_hevc_checkppsrangeextensions
+				(struct bspp_hevc_pps_range_exts *range_exts)
+{
+	VDEC_ASSERT(range_exts);
+
+	if (range_exts->log2_max_transform_skip_block_size_minus2 ||
+	    range_exts->cross_component_prediction_enabled_flag)
+		return 1;
+	/*
+	 * Note: chroma_qp_offset_list_enabled_flag is supported even
+	 * if hw capabilities (bHevcRangeExt is not set)
+	 */
+	return 0;
+}
+
+static enum bspp_error_type bspp_hevc_parsepps
+			(void *sr_ctx, void *str_res,
+			 struct bspp_hevc_pps *pps)
+{
+	enum bspp_error_type parse_err = BSPP_ERROR_NONE;
+	struct bspp_sequence_hdr_info *spsinfo = NULL;
+	struct bspp_hevc_sps *sps = NULL;
+
+	VDEC_ASSERT(sr_ctx);
+	VDEC_ASSERT(pps);
+	memset(pps, 0, sizeof(struct bspp_hevc_pps));
+
+	HEVC_SWSR_UE("pps_pic_parameter_set_id",
+		     (unsigned int *)&pps->pps_pic_parameter_set_id, sr_ctx);
+	HEVC_RANGEUCHECK("pps_pic_parameter_set_id", pps->pps_pic_parameter_set_id, 0,
+			 HEVC_MAX_PPS_COUNT - 1, &parse_err);
+	HEVC_SWSR_UE("pps_seq_parameter_set_id",
+		     (unsigned int *)&pps->pps_seq_parameter_set_id, sr_ctx);
+	HEVC_RANGEUCHECK("pps_seq_parameter_set_id", pps->pps_seq_parameter_set_id, 0,
+			 HEVC_MAX_SPS_COUNT - 1, &parse_err);
+
+	spsinfo = bspp_get_sequ_hdr(str_res, pps->pps_seq_parameter_set_id);
+	if (!spsinfo) {
+		parse_err |= BSPP_ERROR_NO_SEQUENCE_HDR;
+	} else {
+		sps = (struct bspp_hevc_sps *)spsinfo->secure_sequence_info;
+		VDEC_ASSERT(sps->sps_seq_parameter_set_id ==
+			pps->pps_seq_parameter_set_id);
+	}
+
+	HEVC_SWSR_U1("dependent_slice_segments_enabled_flag",
+		     &pps->dependent_slice_segments_enabled_flag, sr_ctx);
+	HEVC_SWSR_U1("output_flag_present_flag",
+		     &pps->output_flag_present_flag, sr_ctx);
+	HEVC_SWSR_UN("num_extra_slice_header_bits",
+		     (unsigned int *)&pps->num_extra_slice_header_bits, 3, sr_ctx);
+	HEVC_SWSR_U1("sign_data_hiding_enabled_flag", &pps->sign_data_hiding_enabled_flag, sr_ctx);
+	HEVC_SWSR_U1("cabac_init_present_flag", &pps->cabac_init_present_flag, sr_ctx);
+	HEVC_SWSR_UE("num_ref_idx_l0_default_active_minus1",
+		     (unsigned int *)&pps->num_ref_idx_l0_default_active_minus1, sr_ctx);
+	HEVC_RANGEUCHECK("num_ref_idx_l0_default_active_minus1",
+			 pps->num_ref_idx_l0_default_active_minus1, 0, 14, &parse_err);
+	HEVC_SWSR_UE("num_ref_idx_l1_default_active_minus1",
+		     (unsigned int *)&pps->num_ref_idx_l1_default_active_minus1, sr_ctx);
+	HEVC_RANGEUCHECK("num_ref_idx_l1_default_active_minus1",
+			 pps->num_ref_idx_l1_default_active_minus1, 0, 14, &parse_err);
+	HEVC_SWSR_SE("init_qp_minus26", (int *)&pps->init_qp_minus26, sr_ctx);
+
+	if (sps)
+		HEVC_RANGESCHECK("init_qp_minus26", pps->init_qp_minus26,
+				 -(26 + (6 * sps->bit_depth_luma_minus8)), 25, &parse_err);
+
+	HEVC_SWSR_U1("constrained_intra_pred_flag", &pps->constrained_intra_pred_flag, sr_ctx);
+	HEVC_SWSR_U1("transform_skip_enabled_flag", &pps->transform_skip_enabled_flag, sr_ctx);
+
+	HEVC_SWSR_U1("cu_qp_delta_enabled_flag", &pps->cu_qp_delta_enabled_flag, sr_ctx);
+
+	if (pps->cu_qp_delta_enabled_flag)
+		HEVC_SWSR_UE("diff_cu_qp_delta_depth",
+			     (unsigned int *)&pps->diff_cu_qp_delta_depth, sr_ctx);
+
+	HEVC_SWSR_SE("pps_cb_qp_offset", (int *)&pps->pps_cb_qp_offset, sr_ctx);
+	HEVC_RANGESCHECK("pps_cb_qp_offset", pps->pps_cb_qp_offset, -12, 12, &parse_err);
+	HEVC_SWSR_SE("pps_cr_qp_offset", (int *)&pps->pps_cr_qp_offset, sr_ctx);
+	HEVC_RANGESCHECK("pps_cr_qp_offset", pps->pps_cr_qp_offset, -12, 12, &parse_err);
+	HEVC_SWSR_U1("pps_slice_chroma_qp_offsets_present_flag",
+		     &pps->pps_slice_chroma_qp_offsets_present_flag, sr_ctx);
+	HEVC_SWSR_U1("weighted_pred_flag", &pps->weighted_pred_flag, sr_ctx);
+	HEVC_SWSR_U1("weighted_bipred_flag", &pps->weighted_bipred_flag, sr_ctx);
+	HEVC_SWSR_U1("transquant_bypass_enabled_flag",
+		     &pps->transquant_bypass_enabled_flag, sr_ctx);
+	HEVC_SWSR_U1("tiles_enabled_flag", &pps->tiles_enabled_flag, sr_ctx);
+	HEVC_SWSR_U1("entropy_coding_sync_enabled_flag",
+		     &pps->entropy_coding_sync_enabled_flag, sr_ctx);
+
+	if (pps->tiles_enabled_flag) {
+		HEVC_SWSR_UE("num_tile_columns_minus1",
+			     (unsigned int *)&pps->num_tile_columns_minus1, sr_ctx);
+		HEVC_RANGEUCHECK("num_tile_columns_minus1", pps->num_tile_columns_minus1, 0,
+				 HEVC_MAX_TILE_COLS - 1, &parse_err);
+
+		if (pps->num_tile_columns_minus1 > HEVC_MAX_TILE_COLS)
+			pps->num_tile_columns_minus1 = HEVC_MAX_TILE_COLS;
+
+		HEVC_SWSR_UE("num_tile_rows_minus1", (unsigned int *)&pps->num_tile_rows_minus1,
+			     sr_ctx);
+		HEVC_RANGEUCHECK("num_tile_rows_minus1", pps->num_tile_rows_minus1, 0,
+				 HEVC_MAX_TILE_ROWS - 1, &parse_err);
+
+		if (pps->num_tile_rows_minus1 > HEVC_MAX_TILE_ROWS)
+			pps->num_tile_rows_minus1 = HEVC_MAX_TILE_ROWS;
+
+		HEVC_SWSR_U1("uniform_spacing_flag", &pps->uniform_spacing_flag, sr_ctx);
+
+		if (!pps->uniform_spacing_flag) {
+			unsigned char i = 0;
+
+			for (i = 0; i < pps->num_tile_columns_minus1; ++i)
+				HEVC_SWSR_UE("column_width_minus1",
+					     (unsigned int *)&pps->column_width_minus1[i],
+					     sr_ctx);
+
+			for (i = 0; i < pps->num_tile_rows_minus1; ++i)
+				HEVC_SWSR_UE("row_height_minus1",
+					     (unsigned int *)&pps->row_height_minus1[i],
+					     sr_ctx);
+		}
+		HEVC_SWSR_U1("loop_filter_across_tiles_enabled_flag",
+			     &pps->loop_filter_across_tiles_enabled_flag, sr_ctx);
+	} else {
+		pps->loop_filter_across_tiles_enabled_flag = 1;
+	}
+
+	HEVC_SWSR_U1("pps_loop_filter_across_slices_enabled_flag",
+		     &pps->pps_loop_filter_across_slices_enabled_flag, sr_ctx);
+
+	HEVC_SWSR_U1("deblocking_filter_control_present_flag",
+		     &pps->deblocking_filter_control_present_flag, sr_ctx);
+
+	if (pps->deblocking_filter_control_present_flag) {
+		HEVC_SWSR_U1("deblocking_filter_override_enabled_flag",
+			     &pps->deblocking_filter_override_enabled_flag, sr_ctx);
+		HEVC_SWSR_U1("pps_deblocking_filter_disabled_flag",
+			     &pps->pps_deblocking_filter_disabled_flag, sr_ctx);
+		if (!pps->pps_deblocking_filter_disabled_flag) {
+			HEVC_SWSR_SE("pps_beta_offset_div2", (int *)&pps->pps_beta_offset_div2,
+				     sr_ctx);
+			HEVC_RANGESCHECK("pps_beta_offset_div2", pps->pps_beta_offset_div2, -6, 6,
+					 &parse_err);
+			HEVC_SWSR_SE("pps_tc_offset_div2", (int *)&pps->pps_tc_offset_div2, sr_ctx);
+			HEVC_RANGESCHECK("pps_tc_offset_div2", pps->pps_tc_offset_div2, -6, 6,
+					 &parse_err);
+		}
+	}
+
+	HEVC_SWSR_U1("pps_scaling_list_data_present_flag",
+		     &pps->pps_scaling_list_data_present_flag, sr_ctx);
+	if (pps->pps_scaling_list_data_present_flag)
+		parse_err |= bspp_hevc_parse_scalinglistdata(sr_ctx, &pps->scaling_list);
+
+	HEVC_SWSR_U1("lists_modification_present_flag",
+		     &pps->lists_modification_present_flag, sr_ctx);
+	HEVC_SWSR_UE("log2_parallel_merge_level_minus2",
+		     (unsigned int *)&pps->log2_parallel_merge_level_minus2, sr_ctx);
+	HEVC_SWSR_U1("slice_segment_header_extension_present_flag",
+		     &pps->slice_segment_header_extension_present_flag, sr_ctx);
+
+	HEVC_SWSR_U1("pps_extension_present_flag", &pps->pps_extension_present_flag, sr_ctx);
+	if (pps->pps_extension_present_flag &&
+	    bspp_hevc_range_extensions_is_enabled(&sps->profile_tier_level)) {
+		HEVC_SWSR_U1("pps_range_extensions_flag",
+			     &pps->pps_range_extensions_flag, sr_ctx);
+		HEVC_SWSR_UN("pps_extension_7bits",
+			     (unsigned int *)&pps->pps_extension_7bits, 7, sr_ctx);
+		/*
+		 * ignore extension data. Although we inform
+		 * if some non-zero data was found
+		 */
+		HEVC_UCHECK("pps_extension_7bits", pps->pps_extension_7bits, 0, &parse_err);
+
+		/*
+		 * TODO ?: the newest HEVC spec (10/2014) splits "pps_extension_7bits" to
+		 * pps_multilayer_extension_flag (1)
+		 * pps_extension_6bits (6)
+		 */
+		if (pps->pps_range_extensions_flag && sps) {
+			parse_err |= bspp_hevc_parsepps_range_extensions
+					(sr_ctx,
+					 &pps->range_exts,
+					 pps->transform_skip_enabled_flag,
+					 sps->log2_diff_max_min_luma_coding_block_size);
+		}
+	}
+
+	/* calculate derived elements */
+	if (pps->tiles_enabled_flag && sps)
+		bspp_hevc_dotilecalculations(sps, pps);
+
+	return parse_err;
+}
+
+static void bspp_hevc_dotilecalculations(struct bspp_hevc_sps *sps,
+					 struct bspp_hevc_pps *pps)
+{
+	unsigned short colwidth[HEVC_MAX_TILE_COLS];
+	unsigned short rowheight[HEVC_MAX_TILE_ROWS];
+	unsigned char i;
+
+	if (!pps->tiles_enabled_flag) {
+		pps->max_tile_height_in_ctbs_y = sps->pic_height_in_ctbs_y;
+		return;
+	}
+
+	if (pps->uniform_spacing_flag) {
+		for (i = 0; i <= pps->num_tile_columns_minus1; ++i) {
+			colwidth[i] = ((i + 1) * sps->pic_width_in_ctbs_y) /
+				(pps->num_tile_columns_minus1 + 1) -
+				(i * sps->pic_width_in_ctbs_y) /
+				(pps->num_tile_columns_minus1 + 1);
+		}
+
+		for (i = 0; i <= pps->num_tile_rows_minus1; ++i) {
+			rowheight[i] = ((i + 1) * sps->pic_height_in_ctbs_y) /
+				(pps->num_tile_rows_minus1 + 1) -
+				(i * sps->pic_height_in_ctbs_y) /
+				(pps->num_tile_rows_minus1 + 1);
+		}
+
+		pps->max_tile_height_in_ctbs_y = rowheight[0];
+	} else {
+		pps->max_tile_height_in_ctbs_y = 0;
+
+		colwidth[pps->num_tile_columns_minus1] = sps->pic_width_in_ctbs_y;
+		for (i = 0; i <= pps->num_tile_columns_minus1; ++i) {
+			colwidth[i] = pps->column_width_minus1[i] + 1;
+			colwidth[pps->num_tile_columns_minus1] -= colwidth[i];
+		}
+
+		rowheight[pps->num_tile_rows_minus1] = sps->pic_height_in_ctbs_y;
+		for (i = 0; i <= pps->num_tile_rows_minus1; ++i) {
+			rowheight[i] = pps->row_height_minus1[i] + 1;
+			rowheight[pps->num_tile_rows_minus1] -= rowheight[i];
+
+			if (rowheight[i] > pps->max_tile_height_in_ctbs_y)
+				pps->max_tile_height_in_ctbs_y = rowheight[i];
+		}
+
+		if (rowheight[pps->num_tile_rows_minus1] > pps->max_tile_height_in_ctbs_y)
+			pps->max_tile_height_in_ctbs_y =
+					rowheight[pps->num_tile_rows_minus1];
+	}
+
+	for (i = 0; i <= pps->num_tile_columns_minus1; ++i)
+		pps->col_bd[i + 1] = pps->col_bd[i] + colwidth[i];
+
+	for (i = 0; i <= pps->num_tile_rows_minus1; ++i)
+		pps->row_bd[i + 1] = pps->row_bd[i] + rowheight[i];
+}
+
+static enum bspp_error_type bspp_hevc_parse_slicesegmentheader
+		(void *sr_ctx, void *str_res,
+		 struct bspp_hevc_slice_segment_header *ssh,
+		 unsigned char nalunit_type,
+		 struct bspp_vps_info **vpsinfo,
+		 struct bspp_sequence_hdr_info **spsinfo,
+		 struct bspp_pps_info **ppsinfo)
+{
+	enum bspp_error_type parse_err = BSPP_ERROR_NONE;
+	struct bspp_hevc_pps *pps = NULL;
+	struct bspp_hevc_sps *sps = NULL;
+	struct bspp_hevc_vps *vps = NULL;
+
+	VDEC_ASSERT(sr_ctx);
+	VDEC_ASSERT(ssh);
+	VDEC_ASSERT(vpsinfo);
+	VDEC_ASSERT(spsinfo);
+	VDEC_ASSERT(ppsinfo);
+
+	memset(ssh, 0, sizeof(struct bspp_hevc_slice_segment_header));
+
+	HEVC_SWSR_U1("first_slice_segment_in_pic_flag",
+		     &ssh->first_slice_segment_in_pic_flag, sr_ctx);
+
+	if (bspp_hevc_picture_is_irap((enum hevc_nalunittype)nalunit_type))
+		HEVC_SWSR_U1("no_output_of_prior_pics_flag",
+			     &ssh->no_output_of_prior_pics_flag, sr_ctx);
+
+	HEVC_SWSR_UE("slice_pic_parameter_set_id", (unsigned int *)&ssh->slice_pic_parameter_set_id,
+		     sr_ctx);
+	HEVC_RANGEUCHECK("slice_pic_parameter_set_id", ssh->slice_pic_parameter_set_id, 0,
+			 HEVC_MAX_PPS_COUNT - 1, &parse_err);
+
+	if (ssh->slice_pic_parameter_set_id >= HEVC_MAX_PPS_COUNT) {
+		pr_warn("PPS Id invalid (%u), setting to 0",
+			ssh->slice_pic_parameter_set_id);
+		ssh->slice_pic_parameter_set_id = 0;
+		parse_err &= ~BSPP_ERROR_INVALID_VALUE;
+		parse_err |= BSPP_ERROR_CORRECTION_VALIDVALUE;
+	}
+
+	/* set PPS */
+	*ppsinfo = bspp_get_pps_hdr(str_res, ssh->slice_pic_parameter_set_id);
+	if (!(*ppsinfo)) {
+		parse_err |= BSPP_ERROR_NO_PPS;
+		goto error;
+	}
+	pps = (struct bspp_hevc_pps *)(*ppsinfo)->secure_pps_info;
+	if (!pps) {
+		parse_err |= BSPP_ERROR_NO_PPS;
+		goto error;
+	}
+	VDEC_ASSERT(pps->pps_pic_parameter_set_id == ssh->slice_pic_parameter_set_id);
+
+	*spsinfo = bspp_get_sequ_hdr(str_res, pps->pps_seq_parameter_set_id);
+	if (!(*spsinfo)) {
+		parse_err |= BSPP_ERROR_NO_SEQUENCE_HDR;
+		goto error;
+	}
+	sps = (struct bspp_hevc_sps *)(*spsinfo)->secure_sequence_info;
+	VDEC_ASSERT(sps->sps_seq_parameter_set_id == pps->pps_seq_parameter_set_id);
+
+	*vpsinfo = bspp_get_vpshdr(str_res, sps->sps_video_parameter_set_id);
+	if (!(*vpsinfo)) {
+		parse_err |= BSPP_ERROR_NO_VPS;
+		goto error;
+	}
+	vps = (struct bspp_hevc_vps *)(*vpsinfo)->secure_vpsinfo;
+	VDEC_ASSERT(vps->vps_video_parameter_set_id == sps->sps_video_parameter_set_id);
+
+	if (!ssh->first_slice_segment_in_pic_flag) {
+		if (pps->dependent_slice_segments_enabled_flag)
+			HEVC_SWSR_U1("dependent_slice_segment_flag",
+				     &ssh->dependent_slice_segment_flag, sr_ctx);
+
+		HEVC_SWSR_UN("slice_segment_address",
+			     (unsigned int *)&ssh->slice_segment_address,
+			     bspp_ceil_log2(sps->pic_size_in_ctbs_y), sr_ctx);
+	}
+
+error:
+	return parse_err;
+}
+
+static enum bspp_error_type bspp_hevc_parse_profiletierlevel
+		(void *sr_ctx,
+		 struct bspp_hevc_profile_tierlevel *ptl,
+		 unsigned char vps_maxsublayers_minus1)
+{
+	enum bspp_error_type parse_err = BSPP_ERROR_NONE;
+	unsigned char i, j;
+	unsigned int res = 0;
+
+	VDEC_ASSERT(sr_ctx);
+	VDEC_ASSERT(ptl);
+	VDEC_ASSERT(vps_maxsublayers_minus1 < HEVC_MAX_NUM_SUBLAYERS);
+
+	memset(ptl, 0, sizeof(struct bspp_hevc_profile_tierlevel));
+
+	HEVC_SWSR_UN("general_profile_space", (unsigned int *)&ptl->general_profile_space, 2,
+		     sr_ctx);
+	HEVC_SWSR_U1("general_tier_flag", &ptl->general_tier_flag, sr_ctx);
+	HEVC_SWSR_UN("general_profile_idc", (unsigned int *)&ptl->general_profile_idc, 5, sr_ctx);
+
+	for (j = 0; j < HEVC_MAX_NUM_PROFILE_IDC; ++j) {
+		HEVC_SWSR_U1("general_profile_compatibility_flag",
+			     &ptl->general_profile_compatibility_flag[j],
+			     sr_ctx);
+	}
+
+	HEVC_SWSR_U1("general_progressive_source_flag",
+		     &ptl->general_progressive_source_flag, sr_ctx);
+	HEVC_SWSR_U1("general_interlaced_source_flag",
+		     &ptl->general_interlaced_source_flag, sr_ctx);
+	HEVC_SWSR_U1("general_non_packed_constraint_flag",
+		     &ptl->general_non_packed_constraint_flag, sr_ctx);
+	HEVC_SWSR_U1("general_frame_only_constraint_flag",
+		     &ptl->general_frame_only_constraint_flag, sr_ctx);
+
+	if (ptl->general_profile_idc == 4 ||
+	    ptl->general_profile_compatibility_flag[4]) {
+		HEVC_SWSR_U1("general_max_12bit_constraint_flag",
+			     &ptl->general_max_12bit_constraint_flag, sr_ctx);
+		HEVC_SWSR_U1("general_max_10bit_constraint_flag",
+			     &ptl->general_max_10bit_constraint_flag, sr_ctx);
+		HEVC_SWSR_U1("general_max_8bit_constraint_flag",
+			     &ptl->general_max_8bit_constraint_flag, sr_ctx);
+		HEVC_SWSR_U1("general_max_422chroma_constraint_flag",
+			     &ptl->general_max_422chroma_constraint_flag,
+			     sr_ctx);
+		HEVC_SWSR_U1("general_max_420chroma_constraint_flag",
+			     &ptl->general_max_420chroma_constraint_flag,
+			     sr_ctx);
+		HEVC_SWSR_U1("general_max_monochrome_constraint_flag",
+			     &ptl->general_max_monochrome_constraint_flag,
+			     sr_ctx);
+		HEVC_SWSR_U1("general_intra_constraint_flag",
+			     &ptl->general_intra_constraint_flag, sr_ctx);
+		HEVC_SWSR_U1("general_one_picture_only_constraint_flag",
+			     &ptl->general_one_picture_only_constraint_flag,
+			     sr_ctx);
+		HEVC_SWSR_U1("general_lower_bit_rate_constraint_flag",
+			     &ptl->general_lower_bit_rate_constraint_flag,
+			     sr_ctx);
+		HEVC_SWSR_UN("general_reserved_zero_35bits", &res, 32, sr_ctx);
+		HEVC_UCHECK("general_reserved_zero_35bits", res, 0, &parse_err);
+		HEVC_SWSR_UN("general_reserved_zero_35bits", &res, 3, sr_ctx);
+		HEVC_UCHECK("general_reserved_zero_35bits", res, 0, &parse_err);
+	} else {
+		HEVC_SWSR_UN("general_reserved_zero_44bits (1)", &res, 32, sr_ctx);
+		HEVC_UCHECK("general_reserved_zero_44bits (1)", res, 0, &parse_err);
+		HEVC_SWSR_UN("general_reserved_zero_44bits (2)", &res, 12, sr_ctx);
+		HEVC_UCHECK("general_reserved_zero_44bits (2)", res, 0, &parse_err);
+	}
+
+	HEVC_SWSR_UN("general_level_idc", (unsigned int *)&ptl->general_level_idc, 8, sr_ctx);
+	HEVC_RANGEUCHECK("general_level_idc", ptl->general_level_idc,
+			 HEVC_LEVEL_IDC_MIN, HEVC_LEVEL_IDC_MAX, &parse_err);
+
+	for (i = 0; i < vps_maxsublayers_minus1; ++i) {
+		HEVC_SWSR_U1("sub_layer_profile_present_flag",
+			     &ptl->sub_layer_profile_present_flag[i], sr_ctx);
+		HEVC_SWSR_U1("sub_layer_level_present_flag",
+			     &ptl->sub_layer_level_present_flag[i], sr_ctx);
+	}
+
+	if (vps_maxsublayers_minus1 > 0) {
+		for (i = vps_maxsublayers_minus1; i < 8; ++i) {
+			HEVC_SWSR_UN("reserved_zero_2bits", &res, 2, sr_ctx);
+			HEVC_UCHECK("reserved_zero_2bits", res, 0, &parse_err);
+		}
+	}
+
+	for (i = 0; i < vps_maxsublayers_minus1; ++i) {
+		if (ptl->sub_layer_profile_present_flag[i]) {
+			HEVC_SWSR_UN("sub_layer_profile_space",
+				     (unsigned int *)&ptl->sub_layer_profile_space[i], 2, sr_ctx);
+			HEVC_SWSR_U1("sub_layer_tier_flag", &ptl->sub_layer_tier_flag[i], sr_ctx);
+			HEVC_SWSR_UN("sub_layer_profile_idc",
+				     (unsigned int *)&ptl->sub_layer_profile_idc[i], 5, sr_ctx);
+			for (j = 0; j < HEVC_MAX_NUM_PROFILE_IDC; ++j)
+				HEVC_SWSR_U1("sub_layer_profile_compatibility_flag",
+					     &ptl->sub_layer_profile_compatibility_flag[i][j],
+					     sr_ctx);
+
+			HEVC_SWSR_U1("sub_layer_progressive_source_flag",
+				     &ptl->sub_layer_progressive_source_flag[i],
+				     sr_ctx);
+			HEVC_SWSR_U1("sub_layer_interlaced_source_flag",
+				     &ptl->sub_layer_interlaced_source_flag[i],
+				     sr_ctx);
+			HEVC_SWSR_U1("sub_layer_non_packed_constraint_flag",
+				     &ptl->sub_layer_non_packed_constraint_flag[i],
+				     sr_ctx);
+			HEVC_SWSR_U1("sub_layer_frame_only_constraint_flag",
+				     &ptl->sub_layer_frame_only_constraint_flag[i],
+				     sr_ctx);
+
+			if (ptl->sub_layer_profile_idc[i] == 4 ||
+			    ptl->sub_layer_profile_compatibility_flag[i][4]) {
+				HEVC_SWSR_U1("sub_layer_max_12bit_constraint_flag",
+					     &ptl->sub_layer_max_12bit_constraint_flag[i],
+					     sr_ctx);
+				HEVC_SWSR_U1("sub_layer_max_10bit_constraint_flag",
+					     &ptl->sub_layer_max_10bit_constraint_flag[i],
+					     sr_ctx);
+				HEVC_SWSR_U1("sub_layer_max_8bit_constraint_flag",
+					     &ptl->sub_layer_max_8bit_constraint_flag[i],
+					     sr_ctx);
+				HEVC_SWSR_U1("sub_layer_max_422chroma_constraint_flag",
+					     &ptl->sub_layer_max_422chroma_constraint_flag[i],
+					     sr_ctx);
+				HEVC_SWSR_U1("sub_layer_max_420chroma_constraint_flag",
+					     &ptl->sub_layer_max_420chroma_constraint_flag[i],
+					     sr_ctx);
+				HEVC_SWSR_U1("sub_layer_max_monochrome_constraint_flag",
+					     &ptl->sub_layer_max_monochrome_constraint_flag[i],
+					     sr_ctx);
+				HEVC_SWSR_U1("sub_layer_intra_constraint_flag",
+					     &ptl->sub_layer_intra_constraint_flag[i],
+					     sr_ctx);
+				HEVC_SWSR_U1("sub_layer_one_picture_only_constraint_flag",
+					     &ptl->sub_layer_one_picture_only_constraint_flag[i],
+					     sr_ctx);
+				HEVC_SWSR_U1("sub_layer_lower_bit_rate_constraint_flag",
+					     &ptl->sub_layer_lower_bit_rate_constraint_flag[i],
+					     sr_ctx);
+				HEVC_SWSR_UN("sub_layer_reserved_zero_35bits",
+					     &res, 32, sr_ctx);
+				HEVC_UCHECK("sub_layer_reserved_zero_35bits",
+					    res, 0, &parse_err);
+				HEVC_SWSR_UN("sub_layer_reserved_zero_35bits",
+					     &res, 3, sr_ctx);
+				HEVC_UCHECK("sub_layer_reserved_zero_35bits",
+					    res, 0, &parse_err);
+			} else {
+				HEVC_SWSR_UN("sub_layer_reserved_zero_44bits (1)",
+					     &res, 32, sr_ctx);
+				HEVC_UCHECK("sub_layer_reserved_zero_44bits (1)",
+					    res, 0, &parse_err);
+				HEVC_SWSR_UN("sub_layer_reserved_zero_44bits (2)",
+					     &res, 12, sr_ctx);
+				HEVC_UCHECK("sub_layer_reserved_zero_44bits (2)",
+					    res, 0, &parse_err);
+			}
+		}
+		if (ptl->sub_layer_level_present_flag[i])
+			HEVC_SWSR_UN("sub_layer_level_idc",
+				     (unsigned int *)&ptl->sub_layer_level_idc[i], 8, sr_ctx);
+	}
+	return parse_err;
+}
+
+/* Default scaling lists */
+#define HEVC_SCALING_LIST_0_SIZE 16
+#define HEVC_SCALING_LIST_123_SIZE 64
+
+static const unsigned char def_4x4[HEVC_SCALING_LIST_0_SIZE] = {
+	16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16
+};
+
+static const unsigned char def_8x8_intra[HEVC_SCALING_LIST_123_SIZE] = {
+	16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 17, 16, 17, 16, 17, 18,
+	17, 18, 18, 17, 18, 21, 19, 20, 21, 20, 19, 21, 24, 22, 22, 24,
+	24, 22, 22, 24, 25, 25, 27, 30, 27, 25, 25, 29, 31, 35, 35, 31,
+	29, 36, 41, 44, 41, 36, 47, 54, 54, 47, 65, 70, 65, 88, 88, 115
+};
+
+static const unsigned char def_8x8_inter[HEVC_SCALING_LIST_123_SIZE] = {
+	16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 17, 17, 17, 17, 17, 18,
+	18, 18, 18, 18, 18, 20, 20, 20, 20, 20, 20, 20, 24, 24, 24, 24,
+	24, 24, 24, 24, 25, 25, 25, 25, 25, 25, 25, 28, 28, 28, 28, 28,
+	28, 33, 33, 33, 33, 33, 41, 41, 41, 41, 54, 54, 54, 71, 71, 91
+};
+
+/*
+ * Scan order mapping when translating scaling lists from bitstream order
+ * to PVDEC order
+ */
+static const unsigned char HEVC_INV_ZZ_SCAN4[HEVC_SCALING_LIST_MATRIX_SIZE / 4]  = {
+	0,  1,  2,  4,  3,  6,  7, 10,  5,  8,  9, 12, 11, 13, 14, 15
+};
+
+static const unsigned char HEVC_INV_ZZ_SCAN8[HEVC_SCALING_LIST_MATRIX_SIZE] = {
+	0,  1,  2,  4,  3,  6,  7, 11,  5,  8,  9, 13, 12, 17, 18, 24,
+	10, 15, 16, 22, 21, 28, 29, 36, 23, 30, 31, 38, 37, 43, 44, 49,
+	14, 19, 20, 26, 25, 32, 33, 40, 27, 34, 35, 42, 41, 47, 48, 53,
+	39, 45, 46, 51, 50, 54, 55, 58, 52, 56, 57, 60, 59, 61, 62, 63
+};
+
+static void bspp_hevc_getdefault_scalinglist
+		(unsigned char size_id, unsigned char matrix_id,
+		 const unsigned char **default_scalinglist,
+		 unsigned int *size)
+{
+	static const unsigned char *defaultlists
+	[HEVC_SCALING_LIST_NUM_SIZES][HEVC_SCALING_LIST_NUM_MATRICES] = {
+		{ def_4x4, def_4x4, def_4x4, def_4x4, def_4x4, def_4x4 },
+		{ def_8x8_intra, def_8x8_intra, def_8x8_intra,
+		  def_8x8_inter, def_8x8_inter, def_8x8_inter },
+		{ def_8x8_intra, def_8x8_intra, def_8x8_intra,
+		  def_8x8_inter, def_8x8_inter, def_8x8_inter },
+		{ def_8x8_intra, def_8x8_inter, NULL, NULL, NULL, NULL }
+	};
+
+	static const unsigned int lists_sizes
+	[HEVC_SCALING_LIST_NUM_SIZES][HEVC_SCALING_LIST_NUM_MATRICES] = {
+		{ sizeof(def_4x4), sizeof(def_4x4), sizeof(def_4x4),
+		  sizeof(def_4x4), sizeof(def_4x4), sizeof(def_4x4) },
+		{ sizeof(def_8x8_intra), sizeof(def_8x8_intra),
+		  sizeof(def_8x8_intra), sizeof(def_8x8_inter),
+		  sizeof(def_8x8_inter), sizeof(def_8x8_inter) },
+		{ sizeof(def_8x8_intra), sizeof(def_8x8_intra),
+		  sizeof(def_8x8_intra), sizeof(def_8x8_inter),
+		  sizeof(def_8x8_inter), sizeof(def_8x8_inter) },
+		{ sizeof(def_8x8_intra), sizeof(def_8x8_inter), 0, 0, 0, 0 }
+	};
+
+	/* to assert that input to this function was correct */
+	VDEC_ASSERT(size_id < 4);
+	VDEC_ASSERT(size_id < 3 ? (matrix_id < 6) : (matrix_id < 2));
+
+	*default_scalinglist = defaultlists[size_id][matrix_id];
+	*size = lists_sizes[size_id][matrix_id];
+}
+
+static enum bspp_error_type bspp_hevc_parse_scalinglistdata
+			(void *sr_ctx,
+			 struct bspp_hevc_scalinglist_data *scaling_listdata)
+{
+	enum bspp_error_type parse_err = BSPP_ERROR_NONE;
+	unsigned char size_id, matrix_id;
+
+	for (size_id = 0; size_id < HEVC_SCALING_LIST_NUM_SIZES; ++size_id) {
+		for (matrix_id = 0; matrix_id < ((size_id == 3) ? 2 : 6);
+			++matrix_id) {
+			/*
+			 * Select scaling list on which we will operate in
+			 * the iteration
+			 */
+			unsigned char *scalinglist = scaling_listdata->lists[size_id][matrix_id];
+
+			unsigned char scaling_list_pred_mode_flag = 0;
+
+			HEVC_SWSR_U1("scaling_list_pred_mode_flag",
+				     &scaling_list_pred_mode_flag, sr_ctx);
+			if (!scaling_list_pred_mode_flag) {
+				unsigned char scaling_list_pred_matrix_id_delta = 0;
+				const unsigned char *defaultlist = NULL;
+				unsigned int listsize = 0;
+
+				HEVC_SWSR_UE("scaling_list_pred_matrixid_delta",
+					     (unsigned int *)&scaling_list_pred_matrix_id_delta,
+					     sr_ctx);
+
+				bspp_hevc_getdefault_scalinglist(size_id,
+								 matrix_id,
+								 &defaultlist,
+								 &listsize);
+
+				if (scaling_list_pred_matrix_id_delta == 0) {
+					/* use default one */
+					memcpy(scalinglist, defaultlist, listsize);
+					if (size_id > 1)
+						scaling_listdata->dccoeffs[size_id -
+						2][matrix_id] = 8 + 8;
+				} else {
+					unsigned char ref_matrix_id =
+						matrix_id - scaling_list_pred_matrix_id_delta;
+					unsigned char *refscalinglist =
+						scaling_listdata->lists[size_id][ref_matrix_id];
+					/*
+					 *  use reference list given by
+					 * scaling_list_pred_matrix_id_delta
+					 */
+					memcpy(scalinglist, refscalinglist, listsize);
+					if (size_id > 1)
+						scaling_listdata->dccoeffs[size_id - 2][matrix_id] =
+							scaling_listdata->dccoeffs[size_id -
+							2][ref_matrix_id];
+				}
+			} else {
+				/*
+				 * scaling list coefficients
+				 * signalled explicitly
+				 */
+				static const short coef_startvalue = 8;
+				static const unsigned char matrix_max_coef_num = 64;
+
+				short next_coef = coef_startvalue;
+				unsigned char coef_num =
+					HEVC_MIN(matrix_max_coef_num,
+						 (1 << (4 + (size_id << 1))), unsigned char);
+
+				unsigned char i;
+
+				if (size_id > 1) {
+					short scaling_list_dc_coef_minus8 = 0;
+
+					HEVC_SWSR_SE("scaling_list_dc_coef_minus8",
+						     (int *)&scaling_list_dc_coef_minus8,
+						     sr_ctx);
+					HEVC_RANGESCHECK("scaling_list_dc_coef_minus8",
+							 scaling_list_dc_coef_minus8,
+							 -7, 247, &parse_err);
+
+					next_coef = scaling_list_dc_coef_minus8 + 8;
+					scaling_listdata->dccoeffs[size_id - 2][matrix_id] =
+						(unsigned char)next_coef;
+				}
+				for (i = 0; i < coef_num; ++i) {
+					short scaling_list_delta_coef = 0;
+
+					HEVC_SWSR_SE("scaling_list_delta_coef",
+						     (int *)&scaling_list_delta_coef, sr_ctx);
+					HEVC_RANGESCHECK("scaling_list_delta_coef",
+							 scaling_list_delta_coef, -128, 127,
+							 &parse_err);
+
+					next_coef = (next_coef + scaling_list_delta_coef + 256) &
+											0xFF;
+					scalinglist[i] = next_coef;
+				}
+			}
+		}
+	}
+
+#ifdef DEBUG_DECODER_DRIVER
+	/* print calculated scaling lists */
+	for (size_id = 0; size_id < HEVC_SCALING_LIST_NUM_SIZES; ++size_id) {
+		for (matrix_id = 0; matrix_id < ((size_id == 3) ? 2 : 6);
+			++matrix_id) {
+			unsigned char i = 0;
+			/*
+			 * Select scaling list on which we will operate
+			 * in the iteration
+			 */
+			unsigned char *scalinglist = scaling_listdata->lists[size_id][matrix_id];
+
+			for (; i < ((size_id == 0) ? 16 : 64); ++i) {
+				BSPP_HEVC_SYNTAX("scalinglist[%u][%u][%u] = %u",
+						 size_id,
+						 matrix_id,
+						 i,
+						 scalinglist[i]);
+			}
+		}
+	}
+#endif
+
+	return parse_err;
+}
+
+static void
+bspp_hevc_usedefault_scalinglists(struct bspp_hevc_scalinglist_data *scaling_listdata)
+{
+	unsigned char size_id, matrix_id;
+
+	for (size_id = 0; size_id < HEVC_SCALING_LIST_NUM_SIZES; ++size_id) {
+		for (matrix_id = 0; matrix_id < ((size_id == 3) ? 2 : 6);
+			++matrix_id) {
+			unsigned char *list = scaling_listdata->lists[size_id][matrix_id];
+			const unsigned char *defaultlist = NULL;
+			unsigned int listsize = 0;
+
+			bspp_hevc_getdefault_scalinglist(size_id, matrix_id, &defaultlist,
+							 &listsize);
+
+			memcpy(list, defaultlist, listsize);
+		}
+	}
+
+	memset(scaling_listdata->dccoeffs, 8 + 8, sizeof(scaling_listdata->dccoeffs));
+}
+
+static enum bspp_error_type bspp_hevc_parse_shortterm_refpicset
+				(void *sr_ctx,
+				 struct bspp_hevc_shortterm_refpicset *st_refpicset,
+				 unsigned char st_rps_idx,
+				 unsigned char in_slice_header)
+{
+	/*
+	 * Note: unfortunately short term ref pic set has to be
+	 * "partially-decoded" and parsed at the same time because derived
+	 * syntax elements are used for prediction of subsequent
+	 * short term ref pic sets.
+	 */
+	enum bspp_error_type parse_err = BSPP_ERROR_NONE;
+
+	struct bspp_hevc_shortterm_refpicset *strps =
+		&st_refpicset[st_rps_idx];
+	unsigned char inter_ref_pic_set_prediction_flag = 0;
+	unsigned int i = 0;
+
+	memset(strps, 0, sizeof(*strps));
+
+	if (st_rps_idx != 0) {
+		HEVC_SWSR_U1("inter_ref_pic_set_prediction_flag",
+			     &inter_ref_pic_set_prediction_flag, sr_ctx);
+	}
+
+	if (inter_ref_pic_set_prediction_flag) {
+		signed char j = 0;
+		unsigned char j_8 = 0;
+		unsigned char ref_rps_idx = 0;
+		int delta_rps = 0;
+		unsigned char i = 0;
+		unsigned char delta_idx_minus1 = 0;
+		unsigned char delta_rps_sign = 0;
+		unsigned short abs_delta_rps_minus1 = 0;
+		unsigned char used_by_curr_pic_flag[HEVC_MAX_NUM_REF_PICS];
+		unsigned char use_delta_flag[HEVC_MAX_NUM_REF_PICS];
+
+		struct bspp_hevc_shortterm_refpicset *ref_strps = NULL;
+
+		if (in_slice_header) {
+			HEVC_SWSR_UE("delta_idx_minus1", (unsigned int *)&delta_idx_minus1, sr_ctx);
+			HEVC_RANGEUCHECK("delta_idx_minus1", delta_idx_minus1, 0, st_rps_idx - 1,
+					 &parse_err);
+		}
+
+		HEVC_SWSR_U1("delta_rps_sign", &delta_rps_sign, sr_ctx);
+		HEVC_SWSR_UE("abs_delta_rps_minus1", (unsigned int *)&abs_delta_rps_minus1, sr_ctx);
+		HEVC_RANGEUCHECK("abs_delta_rps_minus1", abs_delta_rps_minus1, 0, ((1 << 15) - 1),
+				 &parse_err);
+
+		ref_rps_idx = st_rps_idx - (delta_idx_minus1 + 1);
+		ref_strps = &st_refpicset[ref_rps_idx];
+
+		memset(use_delta_flag, 1, sizeof(use_delta_flag));
+
+		for (j_8 = 0; j_8 <= ref_strps->num_delta_pocs; ++j_8) {
+			HEVC_SWSR_U1("used_by_curr_pic_flag", &used_by_curr_pic_flag[j_8], sr_ctx);
+			if (!used_by_curr_pic_flag[j_8])
+				HEVC_SWSR_U1("use_delta_flag", &use_delta_flag[j_8], sr_ctx);
+		}
+
+		delta_rps =
+			(1 - 2 * delta_rps_sign) * (abs_delta_rps_minus1 + 1);
+
+		/*
+		 * predict delta POC values of current strps from
+		 * reference strps
+		 */
+		for (j = ref_strps->num_positive_pics - 1; j >= 0; --j) {
+			int dpoc = ref_strps->delta_poc_s1[j] + delta_rps;
+
+			if (dpoc < 0 && use_delta_flag[ref_strps->num_negative_pics + j]) {
+				strps->delta_poc_s0[i] = dpoc;
+				strps->used_bycurr_pic_s0[i++] =
+					used_by_curr_pic_flag[ref_strps->num_negative_pics + j];
+			}
+		}
+
+		if (delta_rps < 0 && use_delta_flag[ref_strps->num_delta_pocs]) {
+			strps->delta_poc_s0[i] = delta_rps;
+			strps->used_bycurr_pic_s0[i++] =
+				used_by_curr_pic_flag[ref_strps->num_delta_pocs];
+		}
+
+		for (j_8 = 0; j_8 < ref_strps->num_negative_pics; ++j_8) {
+			int dpoc = ref_strps->delta_poc_s0[j_8] + delta_rps;
+
+			if (dpoc < 0 && use_delta_flag[j_8]) {
+				strps->delta_poc_s0[i] = dpoc;
+				strps->used_bycurr_pic_s0[i++] = used_by_curr_pic_flag[j_8];
+			}
+		}
+
+		strps->num_negative_pics = i;
+
+		i = 0;
+		for (j = ref_strps->num_negative_pics - 1; j >= 0; --j) {
+			int dpoc = ref_strps->delta_poc_s0[j] + delta_rps;
+
+			if (dpoc > 0 && use_delta_flag[j]) {
+				strps->delta_poc_s1[i] = dpoc;
+				strps->used_bycurr_pic_s1[i++] =
+					used_by_curr_pic_flag[j];
+			}
+		}
+
+		if (delta_rps > 0 && use_delta_flag[ref_strps->num_delta_pocs]) {
+			strps->delta_poc_s1[i] = delta_rps;
+			strps->used_bycurr_pic_s1[i++] =
+				used_by_curr_pic_flag[ref_strps->num_delta_pocs];
+		}
+
+		for (j_8 = 0; j_8 < ref_strps->num_positive_pics; ++j_8) {
+			int dpoc = ref_strps->delta_poc_s1[j_8] + delta_rps;
+
+			if (dpoc > 0 && use_delta_flag[ref_strps->num_negative_pics + j_8]) {
+				strps->delta_poc_s1[i] = dpoc;
+				strps->used_bycurr_pic_s1[i++] =
+					used_by_curr_pic_flag[ref_strps->num_negative_pics + j_8];
+			}
+		}
+
+		strps->num_positive_pics = i;
+		strps->num_delta_pocs = strps->num_negative_pics + strps->num_positive_pics;
+		if (strps->num_delta_pocs > (HEVC_MAX_NUM_REF_PICS - 1)) {
+			strps->num_delta_pocs = HEVC_MAX_NUM_REF_PICS - 1;
+			parse_err |= BSPP_ERROR_CORRECTION_VALIDVALUE;
+		}
+	} else {
+		unsigned char num_negative_pics = 0;
+		unsigned char num_positive_pics = 0;
+		unsigned short delta_poc_s0_minus1[HEVC_MAX_NUM_REF_PICS];
+		unsigned char used_by_curr_pic_s0_flag[HEVC_MAX_NUM_REF_PICS];
+		unsigned short delta_poc_s1_minus1[HEVC_MAX_NUM_REF_PICS];
+		unsigned char used_by_curr_pic_s1_flag[HEVC_MAX_NUM_REF_PICS];
+		unsigned char j = 0;
+
+		HEVC_SWSR_UE("num_negative_pics", (unsigned int *)&num_negative_pics, sr_ctx);
+		if (num_negative_pics > HEVC_MAX_NUM_REF_PICS) {
+			num_negative_pics = HEVC_MAX_NUM_REF_PICS;
+			parse_err |= BSPP_ERROR_CORRECTION_VALIDVALUE;
+		}
+		HEVC_SWSR_UE("num_positive_pics", (unsigned int *)&num_positive_pics, sr_ctx);
+		if (num_positive_pics > HEVC_MAX_NUM_REF_PICS) {
+			num_positive_pics = HEVC_MAX_NUM_REF_PICS;
+			parse_err |= BSPP_ERROR_CORRECTION_VALIDVALUE;
+		}
+
+		for (j = 0; j < num_negative_pics; ++j) {
+			HEVC_SWSR_UE("delta_poc_s0_minus1",
+				     (unsigned int *)&delta_poc_s0_minus1[j], sr_ctx);
+			HEVC_RANGEUCHECK("delta_poc_s0_minus1", delta_poc_s0_minus1[j], 0,
+					 ((1 << 15) - 1), &parse_err);
+			HEVC_SWSR_U1("used_by_curr_pic_s0_flag",
+				     &used_by_curr_pic_s0_flag[j], sr_ctx);
+
+			if (j == 0)
+				strps->delta_poc_s0[j] =
+					-(delta_poc_s0_minus1[j] + 1);
+			else
+				strps->delta_poc_s0[j] = strps->delta_poc_s0[j - 1] -
+							(delta_poc_s0_minus1[j] + 1);
+
+			strps->used_bycurr_pic_s0[j] = used_by_curr_pic_s0_flag[j];
+		}
+
+		for (j = 0; j < num_positive_pics; j++) {
+			HEVC_SWSR_UE("delta_poc_s1_minus1",
+				     (unsigned int *)&delta_poc_s1_minus1[j], sr_ctx);
+			HEVC_RANGEUCHECK("delta_poc_s1_minus1", delta_poc_s1_minus1[j], 0,
+					 ((1 << 15) - 1), &parse_err);
+			HEVC_SWSR_U1("used_by_curr_pic_s1_flag",
+				     &used_by_curr_pic_s1_flag[j], sr_ctx);
+
+			if (j == 0)
+				strps->delta_poc_s1[j] =
+					(delta_poc_s1_minus1[j] + 1);
+			else
+				strps->delta_poc_s1[j] = strps->delta_poc_s1[j - 1] +
+							(delta_poc_s1_minus1[j] + 1);
+			strps->used_bycurr_pic_s1[j] = used_by_curr_pic_s1_flag[j];
+		}
+
+		strps->num_negative_pics = num_negative_pics;
+		strps->num_positive_pics = num_positive_pics;
+		strps->num_delta_pocs = strps->num_negative_pics + strps->num_positive_pics;
+		if (strps->num_delta_pocs > (HEVC_MAX_NUM_REF_PICS - 1)) {
+			strps->num_delta_pocs = HEVC_MAX_NUM_REF_PICS - 1;
+			parse_err |= BSPP_ERROR_CORRECTION_VALIDVALUE;
+		}
+	}
+
+	BSPP_HEVC_SYNTAX
+		("strps[%u]: num_delta_pocs: %u (%u (num_negative_pics) + %u (num_positive_pics))",
+		 st_rps_idx, strps->num_delta_pocs, strps->num_negative_pics,
+		 strps->num_positive_pics);
+
+	for (i = 0; i < strps->num_negative_pics; ++i) {
+		BSPP_HEVC_SYNTAX("StRps[%u][%u]: delta_poc_s0: %d, used_bycurr_pic_s0: %u",
+				 st_rps_idx, i, strps->delta_poc_s0[i],
+				 strps->used_bycurr_pic_s0[i]);
+	}
+
+	for (i = 0; i < strps->num_positive_pics; ++i) {
+		BSPP_HEVC_SYNTAX("StRps[%u][%u]: delta_poc_s1: %d, used_bycurr_pic_s1: %u",
+				 st_rps_idx, i, strps->delta_poc_s1[i],
+				 strps->used_bycurr_pic_s1[i]);
+	}
+
+	return parse_err;
+}
+
+static void bspp_hevc_fillcommonseqhdr(struct bspp_hevc_sps *sps,
+				       struct vdec_comsequ_hdrinfo *common_seq)
+{
+	struct bspp_hevc_vui_params *vui = &sps->vui_params;
+	unsigned char chroma_idc = sps->chroma_format_idc;
+	struct pixel_pixinfo *pixel_info = &common_seq->pixel_info;
+	unsigned int maxsub_layersmin1;
+	unsigned int maxdpb_size;
+	struct vdec_rect *rawdisp_region;
+
+	common_seq->codec_profile = sps->profile_tier_level.general_profile_idc;
+	common_seq->codec_level   = sps->profile_tier_level.general_level_idc;
+
+	if (sps->vui_parameters_present_flag &&
+	    vui->vui_timing_info_present_flag) {
+		common_seq->frame_rate_num = vui->vui_time_scale;
+		common_seq->frame_rate_den = vui->vui_num_units_in_tick;
+		common_seq->frame_rate =
+			1 * common_seq->frame_rate_num / common_seq->frame_rate_den;
+	}
+
+	if (vui->aspect_ratio_info_present_flag) {
+		common_seq->aspect_ratio_num = vui->sar_width;
+		common_seq->aspect_ratio_den = vui->sar_height;
+	}
+
+	common_seq->interlaced_frames = 0;
+
+	/* handle pixel format definitions */
+	pixel_info->chroma_fmt = chroma_idc == 0 ? 0 : 1;
+	pixel_info->chroma_fmt_idc = pixelformat_idc[chroma_idc];
+	pixel_info->chroma_interleave =
+		chroma_idc == 0 ? PIXEL_INVALID_CI : PIXEL_UV_ORDER;
+	pixel_info->bitdepth_y = sps->bit_depth_luma_minus8 + 8;
+	pixel_info->bitdepth_c = sps->bit_depth_chroma_minus8 + 8;
+
+	pixel_info->mem_pkg = (pixel_info->bitdepth_y > 8 ||
+		(pixel_info->bitdepth_c > 8 && pixel_info->chroma_fmt)) ?
+		PIXEL_BIT10_MSB_MP : PIXEL_BIT8_MP;
+	pixel_info->num_planes =
+		chroma_idc == 0 ? 1 : (sps->separate_colour_plane_flag ? 3 : 2);
+
+	pixel_info->pixfmt = pixel_get_pixfmt(pixel_info->chroma_fmt_idc,
+					      pixel_info->chroma_interleave,
+					      pixel_info->mem_pkg,
+					      pixel_info->bitdepth_y,
+					      pixel_info->chroma_fmt ?
+					      pixel_info->bitdepth_c : PIXEL_INVALID_BDC,
+					      pixel_info->num_planes);
+
+	common_seq->max_frame_size.width = sps->pic_width_in_ctbs_y * sps->ctb_size_y;
+	common_seq->max_frame_size.height = sps->pic_height_in_ctbs_y * sps->ctb_size_y;
+
+	common_seq->frame_size.width = sps->pic_width_in_luma_samples;
+	common_seq->frame_size.height = sps->pic_height_in_luma_samples;
+
+	/* Get HEVC max num ref pictures and pass to bspp info*/
+	vdecddutils_ref_pic_hevc_get_maxnum(common_seq, &common_seq->max_ref_frame_num);
+
+	common_seq->field_codec_mblocks = 0;
+
+	maxsub_layersmin1 = sps->sps_max_sub_layers_minus1;
+	maxdpb_size =
+		HEVC_MAX(sps->sps_max_dec_pic_buffering_minus1[maxsub_layersmin1] + 1,
+			 sps->sps_max_num_reorder_pics[maxsub_layersmin1], unsigned char);
+
+	if (sps->sps_max_latency_increase_plus1[maxsub_layersmin1]) {
+		maxdpb_size =
+			HEVC_MAX(maxdpb_size,
+				 sps->sps_max_latency_pictures[maxsub_layersmin1], unsigned int);
+	}
+
+	maxdpb_size = HEVC_MIN(maxdpb_size,
+			       HEVC_MAX_NUM_REF_IDX_ACTIVE + 1, unsigned int);
+
+	common_seq->min_pict_buf_num = HEVC_MAX(maxdpb_size, 6, unsigned int);
+
+	common_seq->picture_reordering = 1;
+	common_seq->post_processing = 0;
+
+	/* handle display region calculation */
+	rawdisp_region = &common_seq->raw_display_region;
+
+	rawdisp_region->width = sps->pic_width_in_luma_samples;
+	rawdisp_region->height = sps->pic_height_in_luma_samples;
+	rawdisp_region->top_offset = 0;
+	rawdisp_region->left_offset = 0;
+
+	if (sps->conformance_window_flag) {
+		struct vdec_rect *disp_region =
+			&common_seq->orig_display_region;
+
+		disp_region->top_offset =
+			sps->sub_height_c * sps->conf_win_top_offset;
+		disp_region->left_offset =
+			sps->sub_width_c * sps->conf_win_left_offset;
+		disp_region->width =
+			sps->pic_width_in_luma_samples -
+			disp_region->left_offset -
+			sps->sub_width_c * sps->conf_win_right_offset;
+		disp_region->height =
+			sps->pic_height_in_luma_samples -
+			disp_region->top_offset -
+			sps->sub_height_c * sps->conf_win_bottom_offset;
+	} else {
+		common_seq->orig_display_region =
+			common_seq->raw_display_region;
+	}
+}
+
+static void bspp_hevc_fillpicturehdr(struct vdec_comsequ_hdrinfo *common_seq,
+				     enum hevc_nalunittype nalunit_type,
+				     struct bspp_pict_hdr_info *picture_hdr,
+				     struct bspp_hevc_sps *sps,
+				     struct bspp_hevc_pps *pps,
+				     struct bspp_hevc_vps *vps)
+{
+	picture_hdr->intra_coded = (nalunit_type == HEVC_NALTYPE_IDR_W_RADL ||
+		nalunit_type == HEVC_NALTYPE_IDR_N_LP);
+	picture_hdr->field = 0;
+	picture_hdr->post_processing = 0;
+	picture_hdr->discontinuous_mbs = 0;
+	picture_hdr->pict_aux_data.id = BSPP_INVALID;
+	picture_hdr->second_pict_aux_data.id = BSPP_INVALID;
+	picture_hdr->pict_sgm_data.id = BSPP_INVALID;
+	picture_hdr->coded_frame_size.width =
+		HEVC_ALIGN(sps->pic_width_in_luma_samples, HEVC_MIN_CODED_UNIT_SIZE, unsigned int);
+	picture_hdr->coded_frame_size.height =
+		HEVC_ALIGN(sps->pic_height_in_luma_samples, HEVC_MIN_CODED_UNIT_SIZE, unsigned int);
+	picture_hdr->disp_info.enc_disp_region = common_seq->orig_display_region;
+	picture_hdr->disp_info.disp_region = common_seq->orig_display_region;
+	picture_hdr->disp_info.raw_disp_region = common_seq->raw_display_region;
+	picture_hdr->disp_info.num_pan_scan_windows = 0;
+	picture_hdr->hevc_pict_hdr_info.range_ext_present =
+			(sps->profile_tier_level.general_profile_idc == 4) ||
+			sps->profile_tier_level.general_profile_compatibility_flag[4];
+
+	picture_hdr->hevc_pict_hdr_info.is_full_range_ext = 0;
+	if (picture_hdr->hevc_pict_hdr_info.range_ext_present &&
+	    (bspp_hevc_checkppsrangeextensions(&pps->range_exts) ||
+	    bspp_hevc_checksps_range_extensions(&sps->range_exts)))
+		picture_hdr->hevc_pict_hdr_info.is_full_range_ext = 1;
+
+	memset(picture_hdr->disp_info.pan_scan_windows, 0,
+	       sizeof(picture_hdr->disp_info.pan_scan_windows));
+}
+
+static void bspp_hevc_fill_fwsps(struct bspp_hevc_sps *sps, struct hevcfw_sequence_ps *fwsps)
+{
+	unsigned char i;
+
+	fwsps->pic_width_in_luma_samples = sps->pic_width_in_luma_samples;
+	fwsps->pic_height_in_luma_samples = sps->pic_height_in_luma_samples;
+	fwsps->num_short_term_ref_pic_sets = sps->num_short_term_ref_pic_sets;
+	fwsps->num_long_term_ref_pics_sps = sps->num_long_term_ref_pics_sps;
+	fwsps->sps_max_sub_layers_minus1 = sps->sps_max_sub_layers_minus1;
+	fwsps->max_transform_hierarchy_depth_inter =
+				sps->max_transform_hierarchy_depth_inter;
+	fwsps->max_transform_hierarchy_depth_intra =
+				sps->max_transform_hierarchy_depth_intra;
+	fwsps->log2_diff_max_min_transform_block_size =
+				sps->log2_diff_max_min_transform_block_size;
+	fwsps->log2_min_transform_block_size_minus2 =
+				sps->log2_min_transform_block_size_minus2;
+	fwsps->log2_diff_max_min_luma_coding_block_size =
+				sps->log2_diff_max_min_luma_coding_block_size;
+	fwsps->log2_min_luma_coding_block_size_minus3 =
+				sps->log2_min_luma_coding_block_size_minus3;
+
+	HEVC_STATIC_ASSERT(sizeof(sps->sps_max_dec_pic_buffering_minus1) ==
+			   sizeof(fwsps->sps_max_dec_pic_buffering_minus1));
+	memcpy(fwsps->sps_max_dec_pic_buffering_minus1, sps->sps_max_dec_pic_buffering_minus1,
+	       sizeof(fwsps->sps_max_dec_pic_buffering_minus1[0]) *
+	       (sps->sps_max_sub_layers_minus1 + 1));
+
+	HEVC_STATIC_ASSERT(sizeof(sps->sps_max_num_reorder_pics) ==
+			   sizeof(fwsps->sps_max_num_reorder_pics));
+	memcpy(fwsps->sps_max_num_reorder_pics, sps->sps_max_num_reorder_pics,
+	       sizeof(fwsps->sps_max_num_reorder_pics[0]) *
+	       (sps->sps_max_sub_layers_minus1 + 1));
+
+	HEVC_STATIC_ASSERT(sizeof(sps->sps_max_latency_increase_plus1) ==
+			   sizeof(fwsps->sps_max_latency_increase_plus1));
+	memcpy(fwsps->sps_max_latency_increase_plus1, sps->sps_max_latency_increase_plus1,
+	       sizeof(fwsps->sps_max_latency_increase_plus1[0]) *
+	       (sps->sps_max_sub_layers_minus1 + 1));
+
+	fwsps->chroma_format_idc = sps->chroma_format_idc;
+	fwsps->separate_colour_plane_flag = sps->separate_colour_plane_flag;
+	fwsps->log2_max_pic_order_cnt_lsb_minus4 =
+		sps->log2_max_pic_order_cnt_lsb_minus4;
+	fwsps->long_term_ref_pics_present_flag =
+		sps->long_term_ref_pics_present_flag;
+	fwsps->sample_adaptive_offset_enabled_flag =
+		sps->sample_adaptive_offset_enabled_flag;
+	fwsps->sps_temporal_mvp_enabled_flag =
+		sps->sps_temporal_mvp_enabled_flag;
+	fwsps->bit_depth_luma_minus8 = sps->bit_depth_luma_minus8;
+	fwsps->bit_depth_chroma_minus8 = sps->bit_depth_chroma_minus8;
+	fwsps->pcm_sample_bit_depth_luma_minus1 =
+		sps->pcm_sample_bit_depth_luma_minus1;
+	fwsps->pcm_sample_bit_depth_chroma_minus1 =
+		sps->pcm_sample_bit_depth_chroma_minus1;
+	fwsps->log2_min_pcm_luma_coding_block_size_minus3 =
+		sps->log2_min_pcm_luma_coding_block_size_minus3;
+	fwsps->log2_diff_max_min_pcm_luma_coding_block_size =
+		sps->log2_diff_max_min_pcm_luma_coding_block_size;
+	fwsps->pcm_loop_filter_disabled_flag =
+		sps->pcm_loop_filter_disabled_flag;
+	fwsps->amp_enabled_flag = sps->amp_enabled_flag;
+	fwsps->pcm_enabled_flag = sps->pcm_enabled_flag;
+	fwsps->strong_intra_smoothing_enabled_flag =
+		sps->strong_intra_smoothing_enabled_flag;
+	fwsps->scaling_list_enabled_flag = sps->scaling_list_enabled_flag;
+	fwsps->transform_skip_rotation_enabled_flag =
+		sps->range_exts.transform_skip_rotation_enabled_flag;
+	fwsps->transform_skip_context_enabled_flag =
+		sps->range_exts.transform_skip_context_enabled_flag;
+	fwsps->implicit_rdpcm_enabled_flag =
+		sps->range_exts.implicit_rdpcm_enabled_flag;
+	fwsps->explicit_rdpcm_enabled_flag =
+		sps->range_exts.explicit_rdpcm_enabled_flag;
+	fwsps->extended_precision_processing_flag =
+		sps->range_exts.extended_precision_processing_flag;
+	fwsps->intra_smoothing_disabled_flag =
+		sps->range_exts.intra_smoothing_disabled_flag;
+	/* high precision makes no sense for 8 bit luma & chroma,
+	 * so forward this parameter only when bitdepth > 8
+	 */
+	if (sps->bit_depth_luma_minus8 || sps->bit_depth_chroma_minus8)
+		fwsps->high_precision_offsets_enabled_flag =
+			sps->range_exts.high_precision_offsets_enabled_flag;
+
+	fwsps->persistent_rice_adaptation_enabled_flag =
+		sps->range_exts.persistent_rice_adaptation_enabled_flag;
+	fwsps->cabac_bypass_alignment_enabled_flag =
+		sps->range_exts.cabac_bypass_alignment_enabled_flag;
+
+	HEVC_STATIC_ASSERT(sizeof(sps->lt_ref_pic_poc_lsb_sps) ==
+			   sizeof(fwsps->lt_ref_pic_poc_lsb_sps));
+	HEVC_STATIC_ASSERT(sizeof(sps->used_by_curr_pic_lt_sps_flag) ==
+			   sizeof(fwsps->used_by_curr_pic_lt_sps_flag));
+	memcpy(fwsps->lt_ref_pic_poc_lsb_sps, sps->lt_ref_pic_poc_lsb_sps,
+	       sizeof(fwsps->lt_ref_pic_poc_lsb_sps[0]) *
+		sps->num_long_term_ref_pics_sps);
+	memcpy(fwsps->used_by_curr_pic_lt_sps_flag, sps->used_by_curr_pic_lt_sps_flag,
+	       sizeof(fwsps->used_by_curr_pic_lt_sps_flag[0]) * sps->num_long_term_ref_pics_sps);
+
+	for (i = 0; i < sps->num_short_term_ref_pic_sets; ++i)
+		bspp_hevc_fill_fwst_rps(&sps->rps_list[i], &fwsps->st_rps_list[i]);
+
+	/* derived elements */
+	fwsps->pic_size_in_ctbs_y = sps->pic_size_in_ctbs_y;
+	fwsps->pic_height_in_ctbs_y = sps->pic_height_in_ctbs_y;
+	fwsps->pic_width_in_ctbs_y = sps->pic_width_in_ctbs_y;
+	fwsps->ctb_size_y = sps->ctb_size_y;
+	fwsps->ctb_log2size_y = sps->ctb_log2size_y;
+	fwsps->max_pic_order_cnt_lsb = sps->max_pic_order_cnt_lsb;
+
+	HEVC_STATIC_ASSERT(sizeof(sps->sps_max_latency_pictures) ==
+			   sizeof(fwsps->sps_max_latency_pictures));
+	memcpy(fwsps->sps_max_latency_pictures, sps->sps_max_latency_pictures,
+	       sizeof(fwsps->sps_max_latency_pictures[0]) *
+	      (sps->sps_max_sub_layers_minus1 + 1));
+}
+
+static void bspp_hevc_fill_fwst_rps(struct bspp_hevc_shortterm_refpicset *strps,
+				    struct hevcfw_short_term_ref_picset *fwstrps)
+{
+	fwstrps->num_delta_pocs = strps->num_delta_pocs;
+	fwstrps->num_negative_pics = strps->num_negative_pics;
+	fwstrps->num_positive_pics = strps->num_positive_pics;
+
+	HEVC_STATIC_ASSERT(sizeof(strps->delta_poc_s0) ==
+			   sizeof(fwstrps->delta_poc_s0));
+	memcpy(fwstrps->delta_poc_s0, strps->delta_poc_s0,
+	       sizeof(fwstrps->delta_poc_s0[0]) * strps->num_negative_pics);
+
+	HEVC_STATIC_ASSERT(sizeof(strps->delta_poc_s1) ==
+			   sizeof(fwstrps->delta_poc_s1));
+	memcpy(fwstrps->delta_poc_s1, strps->delta_poc_s1,
+	       sizeof(fwstrps->delta_poc_s1[0]) * strps->num_positive_pics);
+
+	HEVC_STATIC_ASSERT(sizeof(strps->used_bycurr_pic_s0) ==
+			   sizeof(fwstrps->used_bycurr_pic_s0));
+	memcpy(fwstrps->used_bycurr_pic_s0, strps->used_bycurr_pic_s0,
+	       sizeof(fwstrps->used_bycurr_pic_s0[0]) * strps->num_negative_pics);
+
+	HEVC_STATIC_ASSERT(sizeof(strps->used_bycurr_pic_s1) ==
+			   sizeof(fwstrps->used_bycurr_pic_s1));
+	memcpy(fwstrps->used_bycurr_pic_s1, strps->used_bycurr_pic_s1,
+	       sizeof(fwstrps->used_bycurr_pic_s1[0]) * strps->num_positive_pics);
+}
+
+static void bspp_hevc_fill_fwpps(struct bspp_hevc_pps *pps, struct hevcfw_picture_ps *fw_pps)
+{
+	fw_pps->pps_pic_parameter_set_id = pps->pps_pic_parameter_set_id;
+	fw_pps->num_tile_columns_minus1 = pps->num_tile_columns_minus1;
+	fw_pps->num_tile_rows_minus1 = pps->num_tile_rows_minus1;
+	fw_pps->diff_cu_qp_delta_depth = pps->diff_cu_qp_delta_depth;
+	fw_pps->init_qp_minus26 = pps->init_qp_minus26;
+	fw_pps->pps_beta_offset_div2 = pps->pps_beta_offset_div2;
+	fw_pps->pps_tc_offset_div2 = pps->pps_tc_offset_div2;
+	fw_pps->pps_cb_qp_offset = pps->pps_cb_qp_offset;
+	fw_pps->pps_cr_qp_offset = pps->pps_cr_qp_offset;
+	fw_pps->log2_parallel_merge_level_minus2 =
+		pps->log2_parallel_merge_level_minus2;
+
+	fw_pps->dependent_slice_segments_enabled_flag =
+		pps->dependent_slice_segments_enabled_flag;
+	fw_pps->output_flag_present_flag = pps->output_flag_present_flag;
+	fw_pps->num_extra_slice_header_bits = pps->num_extra_slice_header_bits;
+	fw_pps->lists_modification_present_flag =
+		pps->lists_modification_present_flag;
+	fw_pps->cabac_init_present_flag = pps->cabac_init_present_flag;
+	fw_pps->weighted_pred_flag = pps->weighted_pred_flag;
+	fw_pps->weighted_bipred_flag = pps->weighted_bipred_flag;
+	fw_pps->pps_slice_chroma_qp_offsets_present_flag =
+		pps->pps_slice_chroma_qp_offsets_present_flag;
+	fw_pps->deblocking_filter_override_enabled_flag =
+		pps->deblocking_filter_override_enabled_flag;
+	fw_pps->tiles_enabled_flag = pps->tiles_enabled_flag;
+	fw_pps->entropy_coding_sync_enabled_flag =
+		pps->entropy_coding_sync_enabled_flag;
+	fw_pps->slice_segment_header_extension_present_flag =
+		pps->slice_segment_header_extension_present_flag;
+	fw_pps->transquant_bypass_enabled_flag =
+		pps->transquant_bypass_enabled_flag;
+	fw_pps->cu_qp_delta_enabled_flag = pps->cu_qp_delta_enabled_flag;
+	fw_pps->transform_skip_enabled_flag = pps->transform_skip_enabled_flag;
+	fw_pps->sign_data_hiding_enabled_flag =
+		pps->sign_data_hiding_enabled_flag;
+	fw_pps->num_ref_idx_l0_default_active_minus1 =
+		pps->num_ref_idx_l0_default_active_minus1;
+	fw_pps->num_ref_idx_l1_default_active_minus1 =
+		pps->num_ref_idx_l1_default_active_minus1;
+	fw_pps->constrained_intra_pred_flag =  pps->constrained_intra_pred_flag;
+	fw_pps->pps_deblocking_filter_disabled_flag =
+		pps->pps_deblocking_filter_disabled_flag;
+	fw_pps->pps_loop_filter_across_slices_enabled_flag =
+		pps->pps_loop_filter_across_slices_enabled_flag;
+	fw_pps->loop_filter_across_tiles_enabled_flag =
+		pps->loop_filter_across_tiles_enabled_flag;
+	fw_pps->log2_max_transform_skip_block_size_minus2 =
+		pps->range_exts.log2_max_transform_skip_block_size_minus2;
+	fw_pps->cross_component_prediction_enabled_flag =
+		pps->range_exts.cross_component_prediction_enabled_flag;
+	fw_pps->chroma_qp_offset_list_enabled_flag =
+		pps->range_exts.chroma_qp_offset_list_enabled_flag;
+	fw_pps->diff_cu_chroma_qp_offset_depth =
+		pps->range_exts.diff_cu_chroma_qp_offset_depth;
+	fw_pps->chroma_qp_offset_list_len_minus1 =
+		pps->range_exts.chroma_qp_offset_list_len_minus1;
+	memcpy(fw_pps->cb_qp_offset_list, pps->range_exts.cb_qp_offset_list,
+	       sizeof(pps->range_exts.cb_qp_offset_list));
+	memcpy(fw_pps->cr_qp_offset_list, pps->range_exts.cr_qp_offset_list,
+	       sizeof(pps->range_exts.cr_qp_offset_list));
+
+	/* derived elements */
+	HEVC_STATIC_ASSERT(sizeof(pps->col_bd) == sizeof(fw_pps->col_bd));
+	HEVC_STATIC_ASSERT(sizeof(pps->row_bd) == sizeof(fw_pps->row_bd));
+	memcpy(fw_pps->col_bd, pps->col_bd, sizeof(fw_pps->col_bd));
+	memcpy(fw_pps->row_bd, pps->row_bd, sizeof(fw_pps->row_bd));
+}
+
+static void bspp_hevc_fill_fw_scaling_lists(struct bspp_hevc_pps *pps,
+					    struct bspp_hevc_sps *sps,
+					    struct hevcfw_picture_ps *fw_pps)
+{
+	signed char size_id, matrix_id;
+	unsigned char *scalinglist;
+	/*
+	 * We are starting at 1 to leave space for addresses,
+	 * filled by lower layer
+	 */
+	unsigned int *scaling_lists = &fw_pps->scaling_lists[1];
+	unsigned char i;
+
+	struct bspp_hevc_scalinglist_data *scaling_listdata =
+		pps->pps_scaling_list_data_present_flag ?
+		&pps->scaling_list :
+		&sps->scalinglist_data;
+
+	if (!sps->scaling_list_enabled_flag)
+		return;
+
+	fw_pps->scaling_list_enabled_flag = sps->scaling_list_enabled_flag;
+
+	for (size_id = HEVC_SCALING_LIST_NUM_SIZES - 1;
+		size_id >= 0; --size_id) {
+		const unsigned char *zz =
+			(size_id == 0 ? HEVC_INV_ZZ_SCAN4 : HEVC_INV_ZZ_SCAN8);
+
+		for (matrix_id = 0; matrix_id < ((size_id == 3) ? 2 : 6);
+			++matrix_id) {
+			/*
+			 * Select scaling list on which we will operate
+			 * in the iteration
+			 */
+			scalinglist =
+				scaling_listdata->lists[size_id][matrix_id];
+
+			for (i = 0; i < ((size_id == 0) ? 16 : 64); i += 4) {
+				*scaling_lists =
+					scalinglist[zz[i + 3]] << 24 |
+					scalinglist[zz[i + 2]] << 16 |
+					scalinglist[zz[i + 1]] << 8 |
+					scalinglist[zz[i]];
+				scaling_lists += 2;
+			}
+		}
+	}
+
+	for (i = 0; i < 2; ++i) {
+		*scaling_lists = scaling_listdata->dccoeffs[1][i];
+		scaling_lists += 2;
+	}
+
+	for (i = 0; i < 6; ++i) {
+		*scaling_lists = scaling_listdata->dccoeffs[0][i];
+		scaling_lists += 2;
+	}
+}
+
+static unsigned int bspp_ceil_log2(unsigned int linear_val)
+{
+	unsigned int log_val = 0;
+
+	if (linear_val > 0)
+		--linear_val;
+
+	while (linear_val > 0) {
+		linear_val >>= 1;
+		++log_val;
+	}
+
+	return log_val;
+}
+
+static unsigned char bspp_hevc_picture_is_irap(enum hevc_nalunittype nalunit_type)
+{
+	return (nalunit_type >= HEVC_NALTYPE_BLA_W_LP) &&
+	       (nalunit_type <= HEVC_NALTYPE_RSV_IRAP_VCL23);
+}
+
+static unsigned char bspp_hevc_picture_is_cra(enum hevc_nalunittype nalunit_type)
+{
+	return (nalunit_type == HEVC_NALTYPE_CRA);
+}
+
+static unsigned char bspp_hevc_picture_is_idr(enum hevc_nalunittype nalunit_type)
+{
+	return (nalunit_type == HEVC_NALTYPE_IDR_N_LP) ||
+	       (nalunit_type == HEVC_NALTYPE_IDR_W_RADL);
+}
+
+static unsigned char bspp_hevc_picture_is_bla(enum hevc_nalunittype nalunit_type)
+{
+	return (nalunit_type >= HEVC_NALTYPE_BLA_W_LP) &&
+	       (nalunit_type <= HEVC_NALTYPE_BLA_N_LP);
+}
+
+static unsigned char bspp_hevc_picture_getnorasl_outputflag
+					(enum hevc_nalunittype nalunit_type,
+					 struct bspp_hevc_inter_pict_ctx *inter_pict_ctx)
+{
+	VDEC_ASSERT(inter_pict_ctx);
+
+	if (bspp_hevc_picture_is_idr(nalunit_type) ||
+	    bspp_hevc_picture_is_bla(nalunit_type) ||
+	    inter_pict_ctx->first_after_eos ||
+	    (bspp_hevc_picture_is_cra(nalunit_type) && inter_pict_ctx->seq_pic_count == 1))
+		return 1;
+
+	return 0;
+}
+
+static unsigned char bspp_hevc_range_extensions_is_enabled
+				(struct bspp_hevc_profile_tierlevel *profile_tierlevel)
+{
+	unsigned char is_enabled;
+
+	is_enabled = profile_tierlevel->general_profile_idc >= 4 ||
+		profile_tierlevel->general_profile_compatibility_flag[4];
+
+	return is_enabled;
+}
+
+static void bspp_hevc_parse_codec_config(void *hndl_swsr_ctx, unsigned int *unit_count,
+					 unsigned int *unit_array_count,
+					 unsigned int *delim_length,
+					 unsigned int *size_delim_length)
+{
+	unsigned long long value = 23;
+
+	/*
+	 * Set the shift-register up to provide next 23 bytes
+	 * without emulation prevention detection.
+	 */
+	swsr_consume_delim(hndl_swsr_ctx, SWSR_EMPREVENT_NONE, 0, &value);
+	/*
+	 * Codec config header must be read for size delimited data (HEVC)
+	 * to get to the start of each unit.
+	 * This parsing follows section 8.3.3.1.2 of ISO/IEC 14496-15:2013.
+	 */
+	swsr_read_bits(hndl_swsr_ctx, 8 * 4);
+	swsr_read_bits(hndl_swsr_ctx, 8 * 4);
+	swsr_read_bits(hndl_swsr_ctx, 8 * 4);
+	swsr_read_bits(hndl_swsr_ctx, 8 * 4);
+	swsr_read_bits(hndl_swsr_ctx, 8 * 4);
+	swsr_read_bits(hndl_swsr_ctx, 8);
+
+	*delim_length = ((swsr_read_bits(hndl_swsr_ctx, 8) & 0x3) + 1) * 8;
+	*unit_array_count = swsr_read_bits(hndl_swsr_ctx, 8);
+
+	/* Size delimiter is only 2 bytes for HEVC codec configuration. */
+	*size_delim_length = 2 * 8;
+}
+
+static void bspp_hevc_update_unitcounts(void *hndl_swsr_ctx, unsigned int *unit_count,
+					unsigned int *unit_array_count)
+{
+	if (*unit_array_count != 0) {
+		unsigned long long value = 3;
+
+		if (*unit_count == 0) {
+			/*
+			 * Set the shift-register up to provide next 3 bytes
+			 * without emulation prevention detection.
+			 */
+			swsr_consume_delim(hndl_swsr_ctx, SWSR_EMPREVENT_NONE, 0, &value);
+
+			swsr_read_bits(hndl_swsr_ctx, 8);
+			*unit_count = swsr_read_bits(hndl_swsr_ctx, 16);
+
+			(*unit_array_count)--;
+			(*unit_count)--;
+		}
+	}
+}
+
+void bspp_hevc_determine_unittype(unsigned char bitstream_unittype,
+				  int disable_mvc,
+				  enum bspp_unit_type *bspp_unittype)
+{
+	/* 6 bits for NAL Unit Type in HEVC */
+	unsigned char type = (bitstream_unittype >> 1) & 0x3f;
+
+	switch (type) {
+	case HEVC_NALTYPE_VPS:
+		*bspp_unittype = BSPP_UNIT_VPS;
+		break;
+
+	case HEVC_NALTYPE_SPS:
+		*bspp_unittype = BSPP_UNIT_SEQUENCE;
+		break;
+
+	case HEVC_NALTYPE_PPS:
+		*bspp_unittype = BSPP_UNIT_PPS;
+		break;
+
+	case HEVC_NALTYPE_TRAIL_N:
+	case HEVC_NALTYPE_TRAIL_R:
+	case HEVC_NALTYPE_TSA_N:
+	case HEVC_NALTYPE_TSA_R:
+	case HEVC_NALTYPE_STSA_N:
+	case HEVC_NALTYPE_STSA_R:
+	case HEVC_NALTYPE_RADL_N:
+	case HEVC_NALTYPE_RADL_R:
+	case HEVC_NALTYPE_RASL_N:
+	case HEVC_NALTYPE_RASL_R:
+	case HEVC_NALTYPE_BLA_W_LP:
+	case HEVC_NALTYPE_BLA_W_RADL:
+	case HEVC_NALTYPE_BLA_N_LP:
+	case HEVC_NALTYPE_IDR_W_RADL:
+	case HEVC_NALTYPE_IDR_N_LP:
+	case HEVC_NALTYPE_CRA:
+	case HEVC_NALTYPE_EOS:
+		/* Attach EOS to picture data, so it can be detected in FW */
+		*bspp_unittype = BSPP_UNIT_PICTURE;
+		break;
+
+	case HEVC_NALTYPE_AUD:
+	case HEVC_NALTYPE_PREFIX_SEI:
+	case HEVC_NALTYPE_SUFFIX_SEI:
+	case HEVC_NALTYPE_EOB:
+	case HEVC_NALTYPE_FD:
+		*bspp_unittype = BSPP_UNIT_NON_PICTURE;
+		break;
+
+	default:
+		*bspp_unittype = BSPP_UNIT_UNSUPPORTED;
+		break;
+	}
+}
+
+int bspp_hevc_set_parser_config(enum vdec_bstr_format bstr_format,
+				struct bspp_vid_std_features *pvidstd_features,
+				struct bspp_swsr_ctx *pswsr_ctx,
+				struct bspp_parser_callbacks *parser_callbacks,
+				struct bspp_inter_pict_data *pinterpict_data)
+{
+	/* set HEVC parser callbacks. */
+	parser_callbacks->parse_unit_cb         = bspp_hevc_unitparser;
+	parser_callbacks->release_data_cb       = bspp_hevc_releasedata;
+	parser_callbacks->reset_data_cb         = bspp_hevc_resetdata;
+	parser_callbacks->parse_codec_config_cb = bspp_hevc_parse_codec_config;
+	parser_callbacks->update_unit_counts_cb = bspp_hevc_update_unitcounts;
+	parser_callbacks->initialise_parsing_cb = bspp_hevc_initialiseparsing;
+	parser_callbacks->finalise_parsing_cb   = bspp_hevc_finaliseparsing;
+
+	/* Set HEVC specific features. */
+	pvidstd_features->seq_size = sizeof(struct bspp_hevc_sequ_hdr_info);
+	pvidstd_features->uses_vps  = 1;
+	pvidstd_features->vps_size = sizeof(struct bspp_hevc_vps);
+	pvidstd_features->uses_pps  = 1;
+	pvidstd_features->pps_size = sizeof(struct bspp_hevc_pps);
+
+	/* Set HEVC specific shift register config. */
+	pswsr_ctx->emulation_prevention = SWSR_EMPREVENT_00000300;
+
+	if (bstr_format == VDEC_BSTRFORMAT_DEMUX_BYTESTREAM ||
+	    bstr_format == VDEC_BSTRFORMAT_ELEMENTARY) {
+		pswsr_ctx->sr_config.delim_type = SWSR_DELIM_SCP;
+		pswsr_ctx->sr_config.delim_length = 3 * 8;
+		pswsr_ctx->sr_config.scp_value = 0x000001;
+	} else if (bstr_format == VDEC_BSTRFORMAT_DEMUX_SIZEDELIMITED) {
+		pswsr_ctx->sr_config.delim_type = SWSR_DELIM_SIZE;
+		pswsr_ctx->sr_config.delim_length = 4 * 8;
+	} else {
+		return IMG_ERROR_NOT_SUPPORTED;
+	}
+
+	return 0;
+}
diff --git a/drivers/staging/media/vxd/decoder/hevc_secure_parser.h b/drivers/staging/media/vxd/decoder/hevc_secure_parser.h
new file mode 100644
index 000000000000..72424e8b8041
--- /dev/null
+++ b/drivers/staging/media/vxd/decoder/hevc_secure_parser.h
@@ -0,0 +1,455 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * h.264 secure data unit parsing API.
+ *
+ * Copyright (c) Imagination Technologies Ltd.
+ * Copyright (c) 2021 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Authors:
+ *	Angela Stegmaier <angelabaker@ti.com>
+ *
+ * Re-written for upstreming
+ *	Sidraya Jayagond <sidraya.bj@pathpartnertech.com>
+ */
+#ifndef __HEVCSECUREPARSER_H__
+#define __HEVCSECUREPARSER_H__
+
+#include "bspp_int.h"
+
+#define HEVC_MAX_NUM_PROFILE_IDC        (32)
+#define HEVC_MAX_NUM_SUBLAYERS          (7)
+#define HEVC_MAX_VPS_OP_SETS_PLUS1      (1024)
+#define HEVC_MAX_VPS_NUH_RESERVED_ZERO_LAYER_ID_PLUS1   (1)
+#define HEVC_MAX_NUM_REF_PICS           (16)
+#define HEVC_MAX_NUM_ST_REF_PIC_SETS    (65)
+#define HEVC_MAX_NUM_LT_REF_PICS        (32)
+#define HEVC_MAX_NUM_REF_IDX_ACTIVE     (15)
+#define HEVC_LEVEL_IDC_MIN              (30)
+#define HEVC_LEVEL_IDC_MAX              (186)
+#define HEVC_1_0_PROFILE_IDC_MAX        (3)
+#define HEVC_MAX_CPB_COUNT              (32)
+#define HEVC_MIN_CODED_UNIT_SIZE        (8)
+
+/* hevc scaling lists (all values are maximum possible ones) */
+#define HEVC_SCALING_LIST_NUM_SIZES     (4)
+#define HEVC_SCALING_LIST_NUM_MATRICES  (6)
+#define HEVC_SCALING_LIST_MATRIX_SIZE   (64)
+
+#define HEVC_MAX_TILE_COLS              (20)
+#define HEVC_MAX_TILE_ROWS              (22)
+
+#define HEVC_EXTENDED_SAR               (255)
+
+#define HEVC_MAX_CHROMA_QP              (6)
+
+enum hevc_nalunittype {
+	HEVC_NALTYPE_TRAIL_N        = 0,
+	HEVC_NALTYPE_TRAIL_R        = 1,
+	HEVC_NALTYPE_TSA_N          = 2,
+	HEVC_NALTYPE_TSA_R          = 3,
+	HEVC_NALTYPE_STSA_N         = 4,
+	HEVC_NALTYPE_STSA_R         = 5,
+	HEVC_NALTYPE_RADL_N         = 6,
+	HEVC_NALTYPE_RADL_R         = 7,
+	HEVC_NALTYPE_RASL_N         = 8,
+	HEVC_NALTYPE_RASL_R         = 9,
+	HEVC_NALTYPE_RSV_VCL_N10    = 10,
+	HEVC_NALTYPE_RSV_VCL_R11    = 11,
+	HEVC_NALTYPE_RSV_VCL_N12    = 12,
+	HEVC_NALTYPE_RSV_VCL_R13    = 13,
+	HEVC_NALTYPE_RSV_VCL_N14    = 14,
+	HEVC_NALTYPE_RSV_VCL_R15    = 15,
+	HEVC_NALTYPE_BLA_W_LP       = 16,
+	HEVC_NALTYPE_BLA_W_RADL     = 17,
+	HEVC_NALTYPE_BLA_N_LP       = 18,
+	HEVC_NALTYPE_IDR_W_RADL     = 19,
+	HEVC_NALTYPE_IDR_N_LP       = 20,
+	HEVC_NALTYPE_CRA            = 21,
+	HEVC_NALTYPE_RSV_IRAP_VCL22 = 22,
+	HEVC_NALTYPE_RSV_IRAP_VCL23 = 23,
+	HEVC_NALTYPE_VPS            = 32,
+	HEVC_NALTYPE_SPS            = 33,
+	HEVC_NALTYPE_PPS            = 34,
+	HEVC_NALTYPE_AUD            = 35,
+	HEVC_NALTYPE_EOS            = 36,
+	HEVC_NALTYPE_EOB            = 37,
+	HEVC_NALTYPE_FD             = 38,
+	HEVC_NALTYPE_PREFIX_SEI     = 39,
+	HEVC_NALTYPE_SUFFIX_SEI     = 40,
+	HEVC_NALTYPE_FORCE32BITS    = 0x7FFFFFFFU
+};
+
+enum bspp_hevcslicetype {
+	HEVC_SLICE_B           = 0,
+	HEVC_SLICE_P           = 1,
+	HEVC_SLICE_I           = 2,
+	HEVC_SLICE_FORCE32BITS = 0x7FFFFFFFU
+};
+
+/* HEVC NAL unit header */
+struct bspp_hevcnalheader {
+	unsigned char nal_unit_type;
+	unsigned char nuh_layer_id;
+	unsigned char nuh_temporal_id_plus1;
+};
+
+/* HEVC video profile_tier_level */
+struct bspp_hevc_profile_tierlevel {
+	unsigned char general_profile_space;
+	unsigned char general_tier_flag;
+	unsigned char general_profile_idc;
+	unsigned char general_profile_compatibility_flag[HEVC_MAX_NUM_PROFILE_IDC];
+	unsigned char general_progressive_source_flag;
+	unsigned char general_interlaced_source_flag;
+	unsigned char general_non_packed_constraint_flag;
+	unsigned char general_frame_only_constraint_flag;
+	unsigned char general_max_12bit_constraint_flag;
+	unsigned char general_max_10bit_constraint_flag;
+	unsigned char general_max_8bit_constraint_flag;
+	unsigned char general_max_422chroma_constraint_flag;
+	unsigned char general_max_420chroma_constraint_flag;
+	unsigned char general_max_monochrome_constraint_flag;
+	unsigned char general_intra_constraint_flag;
+	unsigned char general_one_picture_only_constraint_flag;
+	unsigned char general_lower_bit_rate_constraint_flag;
+	unsigned char general_level_idc;
+	unsigned char sub_layer_profile_present_flag[HEVC_MAX_NUM_SUBLAYERS - 1];
+	unsigned char sub_layer_level_present_flag[HEVC_MAX_NUM_SUBLAYERS - 1];
+	unsigned char sub_layer_profile_space[HEVC_MAX_NUM_SUBLAYERS - 1];
+	unsigned char sub_layer_tier_flag[HEVC_MAX_NUM_SUBLAYERS - 1];
+	unsigned char sub_layer_profile_idc[HEVC_MAX_NUM_SUBLAYERS - 1];
+	unsigned char sub_layer_profile_compatibility_flag[HEVC_MAX_NUM_SUBLAYERS -
+						   1][HEVC_MAX_NUM_PROFILE_IDC];
+	unsigned char sub_layer_progressive_source_flag[HEVC_MAX_NUM_SUBLAYERS - 1];
+	unsigned char sub_layer_interlaced_source_flag[HEVC_MAX_NUM_SUBLAYERS - 1];
+	unsigned char sub_layer_non_packed_constraint_flag[HEVC_MAX_NUM_SUBLAYERS - 1];
+	unsigned char sub_layer_frame_only_constraint_flag[HEVC_MAX_NUM_SUBLAYERS - 1];
+	unsigned char sub_layer_max_12bit_constraint_flag[HEVC_MAX_NUM_SUBLAYERS - 1];
+	unsigned char sub_layer_max_10bit_constraint_flag[HEVC_MAX_NUM_SUBLAYERS - 1];
+	unsigned char sub_layer_max_8bit_constraint_flag[HEVC_MAX_NUM_SUBLAYERS - 1];
+	unsigned char sub_layer_max_422chroma_constraint_flag[HEVC_MAX_NUM_SUBLAYERS - 1];
+	unsigned char sub_layer_max_420chroma_constraint_flag[HEVC_MAX_NUM_SUBLAYERS - 1];
+	unsigned char sub_layer_max_monochrome_constraint_flag[HEVC_MAX_NUM_SUBLAYERS - 1];
+	unsigned char sub_layer_intra_constraint_flag[HEVC_MAX_NUM_SUBLAYERS - 1];
+	unsigned char sub_layer_one_picture_only_constraint_flag[HEVC_MAX_NUM_SUBLAYERS - 1];
+	unsigned char sub_layer_lower_bit_rate_constraint_flag[HEVC_MAX_NUM_SUBLAYERS - 1];
+	unsigned char sub_layer_level_idc[HEVC_MAX_NUM_SUBLAYERS - 1];
+};
+
+/* HEVC sub layer HRD parameters */
+struct bspp_hevc_sublayer_hrd_parameters {
+	unsigned char bit_rate_value_minus1[HEVC_MAX_CPB_COUNT];
+	unsigned char cpb_size_value_minus1[HEVC_MAX_CPB_COUNT];
+	unsigned char cpb_size_du_value_minus1[HEVC_MAX_CPB_COUNT];
+	unsigned char bit_rate_du_value_minus1[HEVC_MAX_CPB_COUNT];
+	unsigned char cbr_flag[HEVC_MAX_CPB_COUNT];
+};
+
+/* HEVC HRD parameters */
+struct bspp_hevc_hrd_parameters {
+	unsigned char nal_hrd_parameters_present_flag;
+	unsigned char vcl_hrd_parameters_present_flag;
+	unsigned char sub_pic_hrd_params_present_flag;
+	unsigned char tick_divisor_minus2;
+	unsigned char du_cpb_removal_delay_increment_length_minus1;
+	unsigned char sub_pic_cpb_params_in_pic_timing_sei_flag;
+	unsigned char dpb_output_delay_du_length_minus1;
+	unsigned char bit_rate_scale;
+	unsigned char cpb_size_scale;
+	unsigned char cpb_size_du_scale;
+	unsigned char initial_cpb_removal_delay_length_minus1;
+	unsigned char au_cpb_removal_delay_length_minus1;
+	unsigned char dpb_output_delay_length_minus1;
+	unsigned char fixed_pic_rate_general_flag[HEVC_MAX_NUM_SUBLAYERS];
+	unsigned char fixed_pic_rate_within_cvs_flag[HEVC_MAX_NUM_SUBLAYERS];
+	unsigned char elemental_duration_in_tc_minus1[HEVC_MAX_NUM_SUBLAYERS];
+	unsigned char low_delay_hrd_flag[HEVC_MAX_NUM_SUBLAYERS];
+	unsigned char cpb_cnt_minus1[HEVC_MAX_NUM_SUBLAYERS];
+	struct bspp_hevc_sublayer_hrd_parameters sublayhrdparams[HEVC_MAX_NUM_SUBLAYERS];
+};
+
+/* HEVC video parameter set */
+struct bspp_hevc_vps {
+	unsigned char is_different;
+	unsigned char is_sent;
+	unsigned char is_available;
+	unsigned char vps_video_parameter_set_id;
+	unsigned char vps_reserved_three_2bits;
+	unsigned char vps_max_layers_minus1;
+	unsigned char vps_max_sub_layers_minus1;
+	unsigned char vps_temporal_id_nesting_flag;
+	unsigned short vps_reserved_0xffff_16bits;
+	struct bspp_hevc_profile_tierlevel profiletierlevel;
+	unsigned char vps_max_dec_pic_buffering_minus1[HEVC_MAX_NUM_SUBLAYERS];
+	unsigned char vps_max_num_reorder_pics[HEVC_MAX_NUM_SUBLAYERS];
+	unsigned char vps_max_latency_increase_plus1[HEVC_MAX_NUM_SUBLAYERS];
+	unsigned char vps_sub_layer_ordering_info_present_flag;
+	unsigned char vps_max_layer_id;
+	unsigned char vps_num_layer_sets_minus1;
+	unsigned char layer_id_included_flag[HEVC_MAX_VPS_OP_SETS_PLUS1]
+		[HEVC_MAX_VPS_NUH_RESERVED_ZERO_LAYER_ID_PLUS1];
+	unsigned char vps_timing_info_present_flag;
+	unsigned int vps_num_units_in_tick;
+	unsigned int vps_time_scale;
+	unsigned char vps_poc_proportional_to_timing_flag;
+	unsigned char vps_num_ticks_poc_diff_one_minus1;
+	unsigned char vps_num_hrd_parameters;
+	unsigned char *hrd_layer_set_idx;
+	unsigned char *cprms_present_flag;
+	unsigned char vps_extension_flag;
+	unsigned char vps_extension_data_flag;
+};
+
+/* HEVC scaling lists */
+struct bspp_hevc_scalinglist_data {
+	unsigned char dccoeffs[HEVC_SCALING_LIST_NUM_SIZES - 2][HEVC_SCALING_LIST_NUM_MATRICES];
+	unsigned char lists[HEVC_SCALING_LIST_NUM_SIZES][HEVC_SCALING_LIST_NUM_MATRICES]
+		[HEVC_SCALING_LIST_MATRIX_SIZE];
+};
+
+/* HEVC short term reference picture set */
+struct bspp_hevc_shortterm_refpicset {
+	unsigned char num_negative_pics;
+	unsigned char num_positive_pics;
+	short delta_poc_s0[HEVC_MAX_NUM_REF_PICS];
+	short delta_poc_s1[HEVC_MAX_NUM_REF_PICS];
+	unsigned char used_bycurr_pic_s0[HEVC_MAX_NUM_REF_PICS];
+	unsigned char used_bycurr_pic_s1[HEVC_MAX_NUM_REF_PICS];
+	unsigned char num_delta_pocs;
+};
+
+/* HEVC video usability information */
+struct bspp_hevc_vui_params {
+	unsigned char aspect_ratio_info_present_flag;
+	unsigned char aspect_ratio_idc;
+	unsigned short sar_width;
+	unsigned short sar_height;
+	unsigned char overscan_info_present_flag;
+	unsigned char overscan_appropriate_flag;
+	unsigned char video_signal_type_present_flag;
+	unsigned char video_format;
+	unsigned char video_full_range_flag;
+	unsigned char colour_description_present_flag;
+	unsigned char colour_primaries;
+	unsigned char transfer_characteristics;
+	unsigned char matrix_coeffs;
+	unsigned char chroma_loc_info_present_flag;
+	unsigned char chroma_sample_loc_type_top_field;
+	unsigned char chroma_sample_loc_type_bottom_field;
+	unsigned char neutral_chroma_indication_flag;
+	unsigned char field_seq_flag;
+	unsigned char frame_field_info_present_flag;
+	unsigned char default_display_window_flag;
+	unsigned short def_disp_win_left_offset;
+	unsigned short def_disp_win_right_offset;
+	unsigned short def_disp_win_top_offset;
+	unsigned short def_disp_win_bottom_offset;
+	unsigned char vui_timing_info_present_flag;
+	unsigned int vui_num_units_in_tick;
+	unsigned int vui_time_scale;
+	unsigned char vui_poc_proportional_to_timing_flag;
+	unsigned int vui_num_ticks_poc_diff_one_minus1;
+	unsigned char vui_hrd_parameters_present_flag;
+	struct bspp_hevc_hrd_parameters vui_hrd_params;
+	unsigned char bitstream_restriction_flag;
+	unsigned char tiles_fixed_structure_flag;
+	unsigned char motion_vectors_over_pic_boundaries_flag;
+	unsigned char restricted_ref_pic_lists_flag;
+	unsigned short min_spatial_segmentation_idc;
+	unsigned char max_bytes_per_pic_denom;
+	unsigned char max_bits_per_min_cu_denom;
+	unsigned char log2_max_mv_length_horizontal;
+	unsigned char log2_max_mv_length_vertical;
+};
+
+/* HEVC sps range extensions */
+struct bspp_hevc_sps_range_exts {
+	unsigned char transform_skip_rotation_enabled_flag;
+	unsigned char transform_skip_context_enabled_flag;
+	unsigned char implicit_rdpcm_enabled_flag;
+	unsigned char explicit_rdpcm_enabled_flag;
+	unsigned char extended_precision_processing_flag;
+	unsigned char intra_smoothing_disabled_flag;
+	unsigned char high_precision_offsets_enabled_flag;
+	unsigned char persistent_rice_adaptation_enabled_flag;
+	unsigned char cabac_bypass_alignment_enabled_flag;
+};
+
+/* HEVC sequence parameter set */
+struct bspp_hevc_sps {
+	unsigned char is_different;
+	unsigned char is_sent;
+	unsigned char is_available;
+	unsigned char sps_video_parameter_set_id;
+	unsigned char sps_max_sub_layers_minus1;
+	unsigned char sps_temporal_id_nesting_flag;
+	struct bspp_hevc_profile_tierlevel profile_tier_level;
+	unsigned char sps_seq_parameter_set_id;
+	unsigned char chroma_format_idc;
+	unsigned char separate_colour_plane_flag;
+	unsigned int pic_width_in_luma_samples;
+	unsigned int pic_height_in_luma_samples;
+	unsigned char conformance_window_flag;
+	unsigned short conf_win_left_offset;
+	unsigned short conf_win_right_offset;
+	unsigned short conf_win_top_offset;
+	unsigned short conf_win_bottom_offset;
+	unsigned char bit_depth_luma_minus8;
+	unsigned char bit_depth_chroma_minus8;
+	unsigned char log2_max_pic_order_cnt_lsb_minus4;
+	unsigned char sps_sub_layer_ordering_info_present_flag;
+	unsigned char sps_max_dec_pic_buffering_minus1[HEVC_MAX_NUM_SUBLAYERS];
+	unsigned char sps_max_num_reorder_pics[HEVC_MAX_NUM_SUBLAYERS];
+	unsigned int sps_max_latency_increase_plus1[HEVC_MAX_NUM_SUBLAYERS];
+	unsigned char log2_min_luma_coding_block_size_minus3;
+	unsigned char log2_diff_max_min_luma_coding_block_size;
+	unsigned char log2_min_transform_block_size_minus2;
+	unsigned char log2_diff_max_min_transform_block_size;
+	unsigned char max_transform_hierarchy_depth_inter;
+	unsigned char max_transform_hierarchy_depth_intra;
+	unsigned char scaling_list_enabled_flag;
+	unsigned char sps_scaling_list_data_present_flag;
+	struct bspp_hevc_scalinglist_data scalinglist_data;
+	unsigned char amp_enabled_flag;
+	unsigned char sample_adaptive_offset_enabled_flag;
+	unsigned char pcm_enabled_flag;
+	unsigned char pcm_sample_bit_depth_luma_minus1;
+	unsigned char pcm_sample_bit_depth_chroma_minus1;
+	unsigned char log2_min_pcm_luma_coding_block_size_minus3;
+	unsigned char log2_diff_max_min_pcm_luma_coding_block_size;
+	unsigned char pcm_loop_filter_disabled_flag;
+	unsigned char num_short_term_ref_pic_sets;
+	struct bspp_hevc_shortterm_refpicset rps_list[HEVC_MAX_NUM_ST_REF_PIC_SETS];
+	unsigned char long_term_ref_pics_present_flag;
+	unsigned char num_long_term_ref_pics_sps;
+	unsigned short lt_ref_pic_poc_lsb_sps[HEVC_MAX_NUM_LT_REF_PICS];
+	unsigned char used_by_curr_pic_lt_sps_flag[HEVC_MAX_NUM_LT_REF_PICS];
+	unsigned char sps_temporal_mvp_enabled_flag;
+	unsigned char strong_intra_smoothing_enabled_flag;
+	unsigned char vui_parameters_present_flag;
+	struct bspp_hevc_vui_params vui_params;
+	unsigned char sps_extension_present_flag;
+	unsigned char sps_range_extensions_flag;
+	struct bspp_hevc_sps_range_exts range_exts;
+	unsigned char sps_extension_7bits;
+	unsigned char sps_extension_data_flag;
+	/* derived elements */
+	unsigned char sub_width_c;
+	unsigned char sub_height_c;
+	unsigned char ctb_log2size_y;
+	unsigned char ctb_size_y;
+	unsigned int pic_width_in_ctbs_y;
+	unsigned int pic_height_in_ctbs_y;
+	unsigned int pic_size_in_ctbs_y;
+	int max_pic_order_cnt_lsb;
+	unsigned int sps_max_latency_pictures[HEVC_MAX_NUM_SUBLAYERS];
+	/* raw vui data as extracted from bitstream. */
+	struct bspp_raw_bitstream_data *vui_raw_data;
+};
+
+/**
+ * struct bspp_hevc_sequ_hdr_info - This structure contains HEVC sequence
+ *					header information (VPS, SPS, VUI)
+ *					contains everything parsed from the
+ *					video/sequence header.
+ * @vps: HEVC sequence header information
+ * @sps:HEVC sequence header information
+ */
+struct bspp_hevc_sequ_hdr_info {
+	struct bspp_hevc_vps vps;
+	struct bspp_hevc_sps sps;
+};
+
+/* HEVC pps range extensions */
+struct bspp_hevc_pps_range_exts {
+	unsigned char log2_max_transform_skip_block_size_minus2;
+	unsigned char cross_component_prediction_enabled_flag;
+	unsigned char chroma_qp_offset_list_enabled_flag;
+	unsigned char diff_cu_chroma_qp_offset_depth;
+	unsigned char chroma_qp_offset_list_len_minus1;
+	unsigned char cb_qp_offset_list[HEVC_MAX_CHROMA_QP];
+	unsigned char cr_qp_offset_list[HEVC_MAX_CHROMA_QP];
+	unsigned char log2_sao_offset_scale_luma;
+	unsigned char log2_sao_offset_scale_chroma;
+};
+
+/* HEVC picture parameter set */
+struct bspp_hevc_pps {
+	unsigned char is_available;
+	unsigned char is_param_copied;
+	unsigned char pps_pic_parameter_set_id;
+	unsigned char pps_seq_parameter_set_id;
+	unsigned char dependent_slice_segments_enabled_flag;
+	unsigned char output_flag_present_flag;
+	unsigned char num_extra_slice_header_bits;
+	unsigned char sign_data_hiding_enabled_flag;
+	unsigned char cabac_init_present_flag;
+	unsigned char num_ref_idx_l0_default_active_minus1;
+	unsigned char num_ref_idx_l1_default_active_minus1;
+	unsigned char init_qp_minus26;
+	unsigned char constrained_intra_pred_flag;
+	unsigned char transform_skip_enabled_flag;
+	unsigned char cu_qp_delta_enabled_flag;
+	unsigned char diff_cu_qp_delta_depth;
+	int pps_cb_qp_offset;
+	int pps_cr_qp_offset;
+	unsigned char pps_slice_chroma_qp_offsets_present_flag;
+	unsigned char weighted_pred_flag;
+	unsigned char weighted_bipred_flag;
+	unsigned char transquant_bypass_enabled_flag;
+	unsigned char tiles_enabled_flag;
+	unsigned char entropy_coding_sync_enabled_flag;
+	unsigned char num_tile_columns_minus1;
+	unsigned char num_tile_rows_minus1;
+	unsigned char uniform_spacing_flag;
+	unsigned char column_width_minus1[HEVC_MAX_TILE_COLS];
+	unsigned char row_height_minus1[HEVC_MAX_TILE_ROWS];
+	unsigned char loop_filter_across_tiles_enabled_flag;
+	unsigned char pps_loop_filter_across_slices_enabled_flag;
+	unsigned char deblocking_filter_control_present_flag;
+	unsigned char deblocking_filter_override_enabled_flag;
+	unsigned char pps_deblocking_filter_disabled_flag;
+	unsigned char pps_beta_offset_div2;
+	unsigned char pps_tc_offset_div2;
+	unsigned char pps_scaling_list_data_present_flag;
+	struct bspp_hevc_scalinglist_data scaling_list;
+	unsigned char lists_modification_present_flag;
+	unsigned char log2_parallel_merge_level_minus2;
+	unsigned char slice_segment_header_extension_present_flag;
+	unsigned char pps_extension_present_flag;
+	unsigned char pps_range_extensions_flag;
+	struct bspp_hevc_pps_range_exts range_exts