<?xml version="1.0" encoding="UTF-8"?>
<rss xmlns:content="http://purl.org/rss/1.0/modules/content/" xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" xmlns:taxo="http://purl.org/rss/1.0/modules/taxonomy/" version="2.0">
  <channel>
    <title>topic Re: i.MX93 EVKCM EthosU NPU Example Error IOCTL failed in i.MX Processors</title>
    <link>https://community.nxp.com/t5/i-MX-Processors/i-MX93-EVKCM-EthosU-NPU-Example-Error-IOCTL-failed/m-p/1672958#M207888</link>
    <description>&lt;P&gt;Hi&lt;BR /&gt;&lt;BR /&gt;Sorry for the delayed reply.&lt;BR /&gt;&lt;BR /&gt;I have been reviewing this case, but I couldn't successfully use the Google Collab tool.&lt;BR /&gt;Do you have any updates on your side about the use of the Efficientdet-lite model on the iMX93?&lt;BR /&gt;Could you please share the model to try on my side?&lt;BR /&gt;&lt;BR /&gt;Have a great day!&lt;BR /&gt;&lt;BR /&gt;Best regards, Brian.&lt;/P&gt;</description>
    <pubDate>Tue, 20 Jun 2023 18:35:05 GMT</pubDate>
    <dc:creator>brian14</dc:creator>
    <dc:date>2023-06-20T18:35:05Z</dc:date>
    <item>
      <title>i.MX93 EVKCM EthosU NPU Example Error IOCTL failed</title>
      <link>https://community.nxp.com/t5/i-MX-Processors/i-MX93-EVKCM-EthosU-NPU-Example-Error-IOCTL-failed/m-p/1660793#M206868</link>
      <description>&lt;P&gt;Hi,&lt;/P&gt;&lt;P&gt;i am facing an error when trying to infer AI models on the EthosU NPU on the i.MX93 EVKCM.&lt;/P&gt;&lt;P&gt;Used BSP:&amp;nbsp;Linux imx93evk 6.1.1+g29549c7073bf&lt;/P&gt;&lt;P&gt;First i compiled the mobilenet model from the tensorflow-lite example folder:&amp;nbsp;&lt;/P&gt;&lt;P&gt;&lt;EM&gt;root@imx93evk:/usr/bin/tensorflow-lite-2.10.0/examples# vela mobilenet_v1_1.0_224_quant.tflite&lt;/EM&gt;&lt;/P&gt;&lt;P&gt;&lt;EM&gt;Output:&lt;/EM&gt;&lt;/P&gt;&lt;P&gt;&lt;FONT color="#808080"&gt;Network summary for mobilenet_v1_1.0_224_quant&lt;/FONT&gt;&lt;BR /&gt;&lt;FONT color="#808080"&gt;Accelerator configuration Ethos_U65_256&lt;/FONT&gt;&lt;BR /&gt;&lt;FONT color="#808080"&gt;System configuration internal-default&lt;/FONT&gt;&lt;BR /&gt;&lt;FONT color="#808080"&gt;Memory mode internal-default&lt;/FONT&gt;&lt;BR /&gt;&lt;FONT color="#808080"&gt;Accelerator clock 1000 MHz&lt;/FONT&gt;&lt;BR /&gt;&lt;FONT color="#808080"&gt;Design peak SRAM bandwidth 16.00 GB/s&lt;/FONT&gt;&lt;BR /&gt;&lt;FONT color="#808080"&gt;Design peak DRAM bandwidth 3.75 GB/s&lt;/FONT&gt;&lt;/P&gt;&lt;P&gt;&lt;FONT color="#808080"&gt;Total SRAM used 370.91 KiB&lt;/FONT&gt;&lt;BR /&gt;&lt;FONT color="#808080"&gt;Total DRAM used 3622.39 KiB&lt;/FONT&gt;&lt;/P&gt;&lt;P&gt;&lt;FONT color="#808080"&gt;CPU operators = 0 (0.0%)&lt;/FONT&gt;&lt;BR /&gt;&lt;FONT color="#808080"&gt;NPU operators = 60 (100.0%)&lt;/FONT&gt;&lt;/P&gt;&lt;P&gt;&lt;FONT color="#808080"&gt;Average SRAM bandwidth 4.73 GB/s&lt;/FONT&gt;&lt;BR /&gt;&lt;FONT color="#808080"&gt;Input SRAM bandwidth 11.96 MB/batch&lt;/FONT&gt;&lt;BR /&gt;&lt;FONT color="#808080"&gt;Weight SRAM bandwidth 9.70 MB/batch&lt;/FONT&gt;&lt;BR /&gt;&lt;FONT color="#808080"&gt;Output SRAM bandwidth 0.00 MB/batch&lt;/FONT&gt;&lt;BR /&gt;&lt;FONT color="#808080"&gt;Total SRAM bandwidth 21.77 MB/batch&lt;/FONT&gt;&lt;BR /&gt;&lt;FONT color="#808080"&gt;Total SRAM bandwidth per input 21.77 MB/inference (batch size 1)&lt;/FONT&gt;&lt;/P&gt;&lt;P&gt;&lt;FONT color="#808080"&gt;Average DRAM bandwidth 2.13 GB/s&lt;/FONT&gt;&lt;BR /&gt;&lt;FONT color="#808080"&gt;Input DRAM bandwidth 1.52 MB/batch&lt;/FONT&gt;&lt;BR /&gt;&lt;FONT color="#808080"&gt;Weight DRAM bandwidth 3.23 MB/batch&lt;/FONT&gt;&lt;BR /&gt;&lt;FONT color="#808080"&gt;Output DRAM bandwidth 5.06 MB/batch&lt;/FONT&gt;&lt;BR /&gt;&lt;FONT color="#808080"&gt;Total DRAM bandwidth 9.82 MB/batch&lt;/FONT&gt;&lt;BR /&gt;&lt;FONT color="#808080"&gt;Total DRAM bandwidth per input 9.82 MB/inference (batch size 1)&lt;/FONT&gt;&lt;/P&gt;&lt;P&gt;&lt;FONT color="#808080"&gt;Neural network macs 572406226 MACs/batch&lt;/FONT&gt;&lt;BR /&gt;&lt;FONT color="#808080"&gt;Network Tops/s 0.25 Tops/s&lt;/FONT&gt;&lt;/P&gt;&lt;P&gt;&lt;FONT color="#808080"&gt;NPU cycles 3891214 cycles/batch&lt;/FONT&gt;&lt;BR /&gt;&lt;FONT color="#808080"&gt;SRAM Access cycles 1020041 cycles/batch&lt;/FONT&gt;&lt;BR /&gt;&lt;FONT color="#808080"&gt;DRAM Access cycles 1677430 cycles/batch&lt;/FONT&gt;&lt;BR /&gt;&lt;FONT color="#808080"&gt;On-chip Flash Access cycles 0 cycles/batch&lt;/FONT&gt;&lt;BR /&gt;&lt;FONT color="#808080"&gt;Off-chip Flash Access cycles 0 cycles/batch&lt;/FONT&gt;&lt;BR /&gt;&lt;FONT color="#808080"&gt;Total cycles 4604278 cycles/batch&lt;/FONT&gt;&lt;/P&gt;&lt;P&gt;&lt;FONT color="#808080"&gt;Batch Inference time 4.60 ms, 217.19 inferences/s (batch size 1)&lt;/FONT&gt;&lt;/P&gt;&lt;P&gt;&lt;FONT color="#000000"&gt;Then i tried the tflite label_image example:&lt;/FONT&gt;&lt;/P&gt;&lt;P&gt;&lt;EM&gt;root@imx93evk:/usr/bin/tensorflow-lite-2.10.0/examples# ./label_image -m mobilenet_v1_1.0_224_quant_vela.tflite external_delegate_path=/usr/lib/libethosu_delegate.so&lt;/EM&gt;&lt;/P&gt;&lt;P&gt;Output:&lt;/P&gt;&lt;P&gt;&lt;FONT color="#FF9900"&gt;INFO: Loaded model mobilenet_v1_1.0_224_quant_vela.tflite&lt;/FONT&gt;&lt;BR /&gt;&lt;FONT color="#FF9900"&gt;INFO: resolved reporter&lt;/FONT&gt;&lt;BR /&gt;&lt;FONT color="#FF9900"&gt;ERROR: Ethos_u inference failed&lt;/FONT&gt;&lt;/P&gt;&lt;P&gt;&lt;FONT color="#FF9900"&gt;ERROR: Node number 0 (ethos-u) failed to invoke.&lt;/FONT&gt;&lt;BR /&gt;&lt;FONT color="#FF9900"&gt;ERROR: Failed to invoke tflite!&amp;nbsp;&lt;/FONT&gt;&lt;/P&gt;&lt;P&gt;&lt;FONT color="#000000"&gt;I also tried the inference_runner script which throws the following error:&lt;/FONT&gt;&lt;/P&gt;&lt;P&gt;&lt;FONT color="#000000"&gt;&lt;EM&gt;./inference_runner -n ./output/mobilenet_v1_1.0_224_quant_vela.tflite -i grace_hopper.bmp -l labels.txt -o output.txt&lt;/EM&gt;&lt;BR /&gt;&lt;FONT color="#FF9900"&gt;Send Ping&lt;/FONT&gt;&lt;BR /&gt;&lt;FONT color="#FF9900"&gt;Send version request&lt;/FONT&gt;&lt;BR /&gt;&lt;FONT color="#FF9900"&gt;Send capabilities request&lt;/FONT&gt;&lt;BR /&gt;&lt;FONT color="#FF9900"&gt;Error: IOCTL failed&lt;/FONT&gt;&lt;/FONT&gt;&lt;/P&gt;&lt;P&gt;Any suggestions what i might do wrong?&lt;/P&gt;&lt;P&gt;&amp;nbsp;&lt;/P&gt;</description>
      <pubDate>Wed, 31 May 2023 12:21:25 GMT</pubDate>
      <guid>https://community.nxp.com/t5/i-MX-Processors/i-MX93-EVKCM-EthosU-NPU-Example-Error-IOCTL-failed/m-p/1660793#M206868</guid>
      <dc:creator>nxf50230</dc:creator>
      <dc:date>2023-05-31T12:21:25Z</dc:date>
    </item>
    <item>
      <title>Re: i.MX93 EVKCM EthosU NPU Example Error IOCTL failed</title>
      <link>https://community.nxp.com/t5/i-MX-Processors/i-MX93-EVKCM-EthosU-NPU-Example-Error-IOCTL-failed/m-p/1662850#M207026</link>
      <description>&lt;P&gt;Hi&amp;nbsp;&lt;a href="https://community.nxp.com/t5/user/viewprofilepage/user-id/63542"&gt;@nxf50230&lt;/a&gt;,&amp;nbsp;&lt;/P&gt;
&lt;P&gt;I tried to replicate this issue and I could run successfully the example model mobilenet_v1_1.0_224_quant.tflite located at /usr/bin/tensorflow-lite-2.10.0/examples.&lt;/P&gt;
&lt;P&gt;In your example you are using the command:&lt;/P&gt;
&lt;LI-CODE lang="markup"&gt;root@imx93evk:/usr/bin/tensorflow-lite-2.10.0/examples# ./label_image -m mobilenet_v1_1.0_224_quant_vela.tflite external_delegate_path=/usr/lib/libethosu_delegate.so&lt;/LI-CODE&gt;
&lt;P&gt;For the i.MX93 you don’t need to describe an external_delegate_path. The model compiled with vela command is automatically detected and runs on the NPU EthosU.&lt;/P&gt;
&lt;P&gt;Please try with the following command:&lt;/P&gt;
&lt;LI-CODE lang="markup"&gt;root@imx93evk:/usr/bin/tensorflow-lite-2.10.0/examples# ./label_image -m output/mobilenet_v1_1.0_224_quant_vela.tflite -i grace_hopper.bmp -l labels.txt&lt;/LI-CODE&gt;
&lt;P&gt;You will see an output like this:&lt;/P&gt;
&lt;P&gt;&lt;span class="lia-inline-image-display-wrapper lia-image-align-inline" image-alt="Brian_Ibarra_0-1685734522182.png" style="width: 705px;"&gt;&lt;img src="https://community.nxp.com/t5/image/serverpage/image-id/226084i6019F0BFC2FFC8F4/image-dimensions/705x145?v=v2" width="705" height="145" role="button" title="Brian_Ibarra_0-1685734522182.png" alt="Brian_Ibarra_0-1685734522182.png" /&gt;&lt;/span&gt;&lt;/P&gt;
&lt;P&gt;Notice that the average inference time is &lt;STRONG&gt;3.885 ms&lt;/STRONG&gt;.&lt;/P&gt;
&lt;P&gt;If we use the mobilenet_v1_1.0_224_quant.tflite without "vela", the model runs on the CPU.&lt;/P&gt;
&lt;P&gt;&lt;span class="lia-inline-image-display-wrapper lia-image-align-inline" image-alt="Brian_Ibarra_1-1685734578678.png" style="width: 707px;"&gt;&lt;img src="https://community.nxp.com/t5/image/serverpage/image-id/226085i599222A4851FA588/image-dimensions/707x145?v=v2" width="707" height="145" role="button" title="Brian_Ibarra_1-1685734578678.png" alt="Brian_Ibarra_1-1685734578678.png" /&gt;&lt;/span&gt;&lt;/P&gt;
&lt;P&gt;Notice the average inference time is &lt;STRONG&gt;135.001 ms&lt;/STRONG&gt;.&lt;/P&gt;
&lt;P&gt;I hope this information will be helpful.&lt;/P&gt;
&lt;P&gt;Have a great day!&lt;/P&gt;</description>
      <pubDate>Fri, 02 Jun 2023 19:37:46 GMT</pubDate>
      <guid>https://community.nxp.com/t5/i-MX-Processors/i-MX93-EVKCM-EthosU-NPU-Example-Error-IOCTL-failed/m-p/1662850#M207026</guid>
      <dc:creator>brian14</dc:creator>
      <dc:date>2023-06-02T19:37:46Z</dc:date>
    </item>
    <item>
      <title>Re: i.MX93 EVKCM EthosU NPU Example Error IOCTL failed</title>
      <link>https://community.nxp.com/t5/i-MX-Processors/i-MX93-EVKCM-EthosU-NPU-Example-Error-IOCTL-failed/m-p/1666080#M207308</link>
      <description>&lt;P&gt;Hi&amp;nbsp;&lt;a href="https://community.nxp.com/t5/user/viewprofilepage/user-id/207096"&gt;@brian14&lt;/a&gt;&amp;nbsp;,&lt;/P&gt;
&lt;P&gt;thanks a lot for your support! I can confirm that the mobilenetV1 model works with the ethosu accelerator, with and without specifying the delegate.&lt;/P&gt;
&lt;P&gt;I just discovered that the described error occured after i tried to run a converted efficientdet-lite2 model (from tensorflow-lite model maker) which doesn't seem to work. So after i ran this model which threw the same error, i also couldn't run the mobilenet models on ethosu anymore. Only a reboot of the board fixed this issue for the mobilenet models.&lt;/P&gt;
&lt;P&gt;I am still wondering why the efficientdet-lite2 model doesn't work because the vela compiler doesn't throw any errors, just warnings of placing unsupported operations on the CPU.&lt;/P&gt;
&lt;P&gt;Do you have any idea why the execution of efficientdet-lite model is throwing this error?&lt;/P&gt;
&lt;P&gt;&amp;nbsp;&lt;/P&gt;</description>
      <pubDate>Thu, 08 Jun 2023 09:44:04 GMT</pubDate>
      <guid>https://community.nxp.com/t5/i-MX-Processors/i-MX93-EVKCM-EthosU-NPU-Example-Error-IOCTL-failed/m-p/1666080#M207308</guid>
      <dc:creator>nxf50230</dc:creator>
      <dc:date>2023-06-08T09:44:04Z</dc:date>
    </item>
    <item>
      <title>Re: i.MX93 EVKCM EthosU NPU Example Error IOCTL failed</title>
      <link>https://community.nxp.com/t5/i-MX-Processors/i-MX93-EVKCM-EthosU-NPU-Example-Error-IOCTL-failed/m-p/1666368#M207338</link>
      <description>&lt;P&gt;Hi&amp;nbsp;&lt;a href="https://community.nxp.com/t5/user/viewprofilepage/user-id/63542"&gt;@nxf50230&lt;/a&gt;,&amp;nbsp;&lt;/P&gt;
&lt;P&gt;Please help me to clarify your request.&lt;/P&gt;
&lt;P&gt;Are you using TensorFlow lite model maker to train your model? (I'm not familiar with this tool)&lt;/P&gt;
&lt;P&gt;I suspect there could be an error with the training step or with the conversion, you can try to do this conversion using the eIQ Tool from NXP.&lt;/P&gt;
&lt;P&gt;I will try to run this model on i.MX93 and I will update it here as soon as possible.&lt;/P&gt;
&lt;P&gt;Have a great day!&lt;/P&gt;</description>
      <pubDate>Thu, 08 Jun 2023 18:33:40 GMT</pubDate>
      <guid>https://community.nxp.com/t5/i-MX-Processors/i-MX93-EVKCM-EthosU-NPU-Example-Error-IOCTL-failed/m-p/1666368#M207338</guid>
      <dc:creator>brian14</dc:creator>
      <dc:date>2023-06-08T18:33:40Z</dc:date>
    </item>
    <item>
      <title>Re: i.MX93 EVKCM EthosU NPU Example Error IOCTL failed</title>
      <link>https://community.nxp.com/t5/i-MX-Processors/i-MX93-EVKCM-EthosU-NPU-Example-Error-IOCTL-failed/m-p/1666814#M207377</link>
      <description>&lt;P&gt;Hi&amp;nbsp;&lt;a href="https://community.nxp.com/t5/user/viewprofilepage/user-id/207096"&gt;@brian14&lt;/a&gt;,&lt;/P&gt;&lt;P&gt;Yes, the&amp;nbsp;&lt;A href="https://www.tensorflow.org/lite/models/modify/model_maker/object_detection" target="_self"&gt;tflite model maker&lt;/A&gt;&amp;nbsp;is used to train and convert the model to a fully 8bit quantized version.&amp;nbsp;There is no error during training or conversion because i can run the model on the i.MX8M Plus NPU without any problems.&lt;/P&gt;</description>
      <pubDate>Fri, 09 Jun 2023 09:08:55 GMT</pubDate>
      <guid>https://community.nxp.com/t5/i-MX-Processors/i-MX93-EVKCM-EthosU-NPU-Example-Error-IOCTL-failed/m-p/1666814#M207377</guid>
      <dc:creator>nxf50230</dc:creator>
      <dc:date>2023-06-09T09:08:55Z</dc:date>
    </item>
    <item>
      <title>Re: i.MX93 EVKCM EthosU NPU Example Error IOCTL failed</title>
      <link>https://community.nxp.com/t5/i-MX-Processors/i-MX93-EVKCM-EthosU-NPU-Example-Error-IOCTL-failed/m-p/1672958#M207888</link>
      <description>&lt;P&gt;Hi&lt;BR /&gt;&lt;BR /&gt;Sorry for the delayed reply.&lt;BR /&gt;&lt;BR /&gt;I have been reviewing this case, but I couldn't successfully use the Google Collab tool.&lt;BR /&gt;Do you have any updates on your side about the use of the Efficientdet-lite model on the iMX93?&lt;BR /&gt;Could you please share the model to try on my side?&lt;BR /&gt;&lt;BR /&gt;Have a great day!&lt;BR /&gt;&lt;BR /&gt;Best regards, Brian.&lt;/P&gt;</description>
      <pubDate>Tue, 20 Jun 2023 18:35:05 GMT</pubDate>
      <guid>https://community.nxp.com/t5/i-MX-Processors/i-MX93-EVKCM-EthosU-NPU-Example-Error-IOCTL-failed/m-p/1672958#M207888</guid>
      <dc:creator>brian14</dc:creator>
      <dc:date>2023-06-20T18:35:05Z</dc:date>
    </item>
  </channel>
</rss>

