This page is intended to complement  UG1186 "LibMetal and OpenAMP User Guide"  for Zynq-7000 and Zynq UltraScale+ MPSoC.

Table of Contents

Getting Started with the Pre-Built Images

Here are the basic steps to boot Linux and run an openamp application using pre-built images.

e.g for ZCU102:
The echo-test application sends packets from Linux running on quad-core Cortex-A53 to a single cortex-R5 running FreeRTOS which sends them back.
Note: Alternatively, if you already created a PetaLinux project with a provided BSP for your board, pre-built images can also be found under the <your project>/pre-built/linux/images/ directory.
Note: As an alternative to all steps above to boot from an SD card, you can boot the board via JTAG. For this you need to have connected a JTAG cable, installed JTAG drivers and created a PetaLinux project using a provided BSP. You would then go into the <your project>/pre-built/linux/images directory and replace file system.dtb by openamp.dtb, then enter: "petalinux-boot --jtag --prebuilt 3"

Docs and source code

Documents


URLs to source code


Xilinx Openamp and Libmetal related code

The following locations provide access to the code:

Additional examples

ZynqMP Linux Master running on APU with RPMsg in kernel space and one RPU slave.

When running with RPU in split mode and only one RPU is an OpenAMP slave, the second RPU can still run another non-openamp application.

Firmware:

Device Tree:

/ {
	reserved-memory {
		#address-cells = <2>;
		#size-cells = <2>;
		ranges;
		rpu0vdev0vring0: rpu0vdev0vring0@3ed40000 {
			no-map;
			reg = <0x0 0x3ed40000 0x0 0x4000>;
		};
		rpu0vdev0vring1: rpu0vdev0vring1@3ed44000 {
			no-map;
			reg = <0x0 0x3ed44000 0x0 0x4000>;
		};
		rpu0vdev0buffer: rpu0vdev0buffer@3ed48000 {
			no-map;
			reg = <0x0 0x3ed48000 0x0 0x100000>;
		};
		rproc_0_reserved: rproc@3ed00000 {
			no-map;
			reg = <0x0 0x3ed00000 0x0 0x40000>;
		};
	};
 
	zynqmp-rpu {
		compatible = "xlnx,zynqmp-r5-remoteproc-1.0";
		#address-cells = <2>;
		#size-cells = <2>;
		ranges;
		core_conf = "split";
		reg = <0x0 0xFF9A0000 0x0 0x10000>;
		r5_0: r5@0 {
			#address-cells = <2>;
			#size-cells = <2>;
			ranges;
			memory-region = <&rproc_0_reserved>, <&rpu0vdev0buffer>, <&rpu0vdev0vring0>, <&rpu0vdev0vring1>;
			pnode-id = <0x7>;
			mboxes = <&ipi_mailbox_rpu0 0>, <&ipi_mailbox_rpu0 1>;
			mbox-names = "tx", "rx";
			tcm_0_a: tcm_0@0 {
				reg = <0x0 0xFFE00000 0x0 0x10000>;
				pnode-id = <0xf>;
			};
			tcm_0_b: tcm_0@1 {
				reg = <0x0 0xFFE20000 0x0 0x10000>;
				pnode-id = <0x10>;
			};
		};
		/* if instead for RPU1 use the following: 
    	r5_1: r5@1 {
        	#address-cells = <2>;
        	#size-cells = <2>;
        	ranges;
        	memory-region = <&rproc_0_fw_reserved>,
                <&rproc_0_dma_reserved>;
        	pnode-id = <0x8>;
        	mboxes = <&ipi_mailbox_rpu0 0>, <&ipi_mailbox_rpu0 1>;
        	mbox-names = "tx", "rx";
        	tcm_a: tcm@0 {
            	reg = <0x0 0xFFE90000 0x0 0x10000>;
            	pnode-id = <0x11>;
        	};
        	tcm_b: tcm@1 {
            	reg = <0x0 0xFFEb0000 0x0 0x10000>;
            	pnode-id = <0x12>;
        	};
    	};
		*/
	};


	zynqmp_ipi1 {
		compatible = "xlnx,zynqmp-ipi-mailbox";
		interrupt-parent = <&gic>;
		interrupts = <0 29 4>;
		xlnx,ipi-id = <7>;
		#address-cells = <1>;
		#size-cells = <1>;
		ranges;

		/* APU<->RPU0 IPI mailbox controller */
		ipi_mailbox_rpu0: mailbox@ff990600 {
			reg = <0xff990600 0x20>,
			      <0xff990620 0x20>,
			      <0xff9900c0 0x20>,
			      <0xff9900e0 0x20>;
			reg-names = "local_request_region",
				    "local_response_region",
				    "remote_request_region",
				    "remote_response_region";
			#mbox-cells = <1>;
			xlnx,ipi-id = <1>;
		};
	};
};


ZynqMP RPU to manage Linux

NOTE: rpu slave applications right are only supported by default to run in TCM. What this means in practice is that for RPU to load and start other RPU, the entirety of the slave application must be loaded and run in TCM. APU remoteproc slave does support running application in DDR.

  1. Create R5-0 standalone BSP
  2. Build libmetal for R5 standalone
    1. below is sample toolchain file


      set (CMAKE_SYSTEM_PROCESSOR "arm"           CACHE STRING "")
      set (MACHINE        "zynqmp_r5" CACHE STRING "")
       
      set (CROSS_PREFIX           "armr5-none-eabi-" CACHE STRING "")
      set (CMAKE_C_FLAGS          "-mfloat-abi=hard -mcpu=cortex-r5 -mfpu=vfpv3-d16  -Wall -Werror -Wextra \
         -flto -Os -I<path to bsp>/bsp/psu_cortexr5_0/include" CACHE STRING "")
       
      link_directories(
      <path to bsp>/bsp/psu_cortexr5_0/lib
      )
      set (PLATFORM_LIB_DEPS      "  -lxil -lxilstandalone  -lc -lm  -lxilpm " CACHE STRING "")
      SET(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -flto")
      SET(CMAKE_AR  "gcc-ar" CACHE STRING "")
      SET(CMAKE_C_ARCHIVE_CREATE "<CMAKE_AR> qcs <TARGET> <LINK_FLAGS> <OBJECTS>")
      SET(CMAKE_C_ARCHIVE_FINISH   true)
      include (cross-generic-gcc)


    2. script to build libmetal

      git clone https://github.com/OpenAMP/libmetal.git
      cd libmetal
        
      mkdir build_r5
      cd build_r5
      cmake .. -DCMAKE_TOOLCHAIN_FILE=<toolchain_file>  \
      -DCMAKE_LIBRARY_PATH=<path to bsp>/bsp/psu_cortexr5_0/lib 
        
      make DESTDIR=. install VERBOSE=1


  3. build openamp for r5 standalone


    1. toolchain file for openamp

      set (CMAKE_SYSTEM_PROCESSOR "arm" CACHE STRING "")
          set (MACHINE                zynqmp_r5 )
          set (CROSS_PREFIX           "armr5-none-eabi-" CACHE STRING "")
          set (CMAKE_C_FLAGS          "-mfloat-abi=hard -mcpu=cortex-r5 -Os -flto -mfpu=vfpv3-d16 -DUNDEFINE_FILE_OPS \
          -I<path to libmetal repo>/libmetal/build_r5/usr/local/include \
          -I<bsp path>/bsp/psu_cortexr5_0/include" CACHE STRING "")
          set (CMAKE_ASM_FLAGS        " -mcpu=cortex-r5  " CACHE STRING "")
          set (PLATFORM_LIB_DEPS      "  -lxil -lxilstandalone  -lxilpm -lxilmem -lc -lm" CACHE STRING "")
          SET(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -flto ")
          SET(CMAKE_AR  "gcc-ar" CACHE STRING "")
          SET(CMAKE_C_ARCHIVE_CREATE "<CMAKE_AR> qcs <TARGET> <LINK_FLAGS> <OBJECTS>")
          SET(CMAKE_C_ARCHIVE_FINISH   true)
          link_directories(
      <path to libmetal repo>/libmetal/build_r5/usr/local/include/
      <path to libmetal repo>/libmetal/build_r5/usr/local/lib/
              )
         set (WITH_LOAD_FW ON)
         set (CMAKE_FIND_ROOT_PATH <path to libmetal repo>/libmetal/build/usr/local/lib <bsp path>/bsp/psu_cortexr5_0/lib )
          include (cross_generic_gcc)
          string (TOLOWER "FreeRTOS"                PROJECT_SYSTEM)
          string (TOUPPER "FreeRTOS"                PROJECT_SYSTEM_UPPER)
      # vim: expandtab:ts=2:sw=2:smartindent


    2. srcipt using toolchain file for openamp

      git clone https://github.com/OpenAMP/open-amp.git
      cd open-amp
      mkdir build
      cd build
      cmake .. \
       -DCMAKE_TOOLCHAIN_FILE=toolchain \
       -DCMAKE_INCLUDE_PATH="<path to libmetal repo>/libmetal/build_r5/lib/include/;<path to bsp>/bsp/psu_cortexr5_0/include/" \
       -DCMAKE_LIBRARY_PATH="<path to libmetal repo>/libmetal/build_r5/usr/local/lib/<path to bsp>/bsp/psu_cortexr5_0/lib/" -DWITH_APPS=on -DWITH_LOAD_FW=ON
      make DESTDIR=$(pwd) install VERBOSE=1


      Note: how to set remoteproc elf-load slave
      1. If RPU1 is slave, add thef ollowing after -DWITH_LOAD_FW=ON, add -DLOAD_FW_TARGET=NODE_RPU_1
      2. If APU is slave, add thef ollowing after -DWITH_LOAD_FW=ON, add -DLOAD_FW_TARGET=NODE_APU_1
  4. boot up to uboot on target zcu102
    1. in xsdb: 
      1. WARNING: do not reset remoteproc slave processor. as this introduces issue when using PM library for lifecycle management
      2. confgure r5 to be in split mode if using single r5 as remoteproc slave
      3. reset tcm
      4. load remoteproc slave application into base address (default 0x3ed00000)
      5. load binary for r5 remoteproc master (in this case r5 0)
      6. start r5 0


        ta 6
        # set r5 to split
        mwr 0xFF9A0000 0x08
        # reset tcm
        ta 7
        rst -processor
        mwr 0xFFE00000 0 10000
        after 1000
        mwr 0xFFE20000 0 10000
        after 1000
         
        # load apu
        dow -data <a53 app> 0x3ed00000
         
         
        # load rpu
        ta 6
        dow load_fw.out
        # start rpu
        con


Versal OpenAMP Demos using RPMsg in kernel-space

  1. Configure PetaLinux to run the demo
    1. Download 2020.1 Versal BSP

  2. boot on target 


    $ petalinux-boot --jtag --prebuilt 3


  3. Running the Demo on Target
    1. After starting firmware on target the output from running Linux-side the output is as follows:

      $ echo_test -d <rpmsg channel name>
       Echo test start
       Open rpmsg dev /dev/rpmsg0!
       **************************************


Versal RPU to manage Linux

Below is  example to have RPU with OpenAMP boot up Linux on Versal platform

  1. Generate linux_boot.elf with .S file and script below

    /*
     * Drops EL from 3 down to 2 and sets up the CPU for AArch64 execution.
     *
     * The kernel start address and DTB location can easily be patched at runtime
     * before jumping to this code-snippet if needed.
     * To for example build and link to 0xfffc8000:
     * aarch64-none-elf-gcc -nostartfiles -nodefaultlibs  -Wl,--build-id=none,-Ttext=0xfffc8000 linux-boot.s -o linux-boot
     *
     */
            .section        .text
            .global         _start
    _start:
            ldr     x17, kernel_start
            ldr     x0, kernel_dts
            mov     x1, xzr
            mov     x2, xzr
            mov     x3, xzr
            blr     x17
            .balign 8
    kernel_start:
            .dword  0x00080000
    kernel_dts:
            .dword  0x1000


  2. Generate RPU openamp application to load ATF and boot linux

  3. libmetal

  4. build openamp load firmware demo
  5. Building ATF
  6. boot linux on versal board with the following sessions for console and Xilinx debugger 



Feature Changes


Module NameChangeLink
Xen Dom0 and DomU support for OpenAMP running in RPMsg userspace on Versalsupport for these two configurations in 2020.1
RPU as Lifecycle master