IVGCVSW-3307 Don't assume TensorInfo::Map() can be called before Execute()

Change-Id: I445c69d2e99d8c93622e739af61f721e61b0f90f
Signed-off-by: Matthew Bentham <Matthew.Bentham@arm.com>
diff --git a/src/backends/reference/workloads/BaseIterator.hpp b/src/backends/reference/workloads/BaseIterator.hpp
index 26b0179..5583fe7 100644
--- a/src/backends/reference/workloads/BaseIterator.hpp
+++ b/src/backends/reference/workloads/BaseIterator.hpp
@@ -8,6 +8,8 @@
 #include <armnn/ArmNN.hpp>
 #include <ResolveType.hpp>
 
+#include <boost/assert.hpp>
+
 namespace armnn
 {
 
@@ -35,6 +37,8 @@
 
     virtual ~Decoder() {}
 
+    virtual void Reset(void*) = 0;
+
     virtual IType Get() const = 0;
 };
 
@@ -46,6 +50,8 @@
 
     virtual ~Encoder() {}
 
+    virtual void Reset(void*) = 0;
+
     virtual void Set(IType right) = 0;
 
     virtual IType Get() const = 0;
@@ -55,30 +61,40 @@
 class TypedIterator : public Base
 {
 public:
-    TypedIterator(T* data)
+    TypedIterator(T* data = nullptr)
         : m_Iterator(data), m_Start(data)
     {}
 
+    void Reset(void* data) override
+    {
+        m_Iterator = reinterpret_cast<T*>(data);
+        m_Start = m_Iterator;
+    }
+
     TypedIterator& operator++() override
     {
+        BOOST_ASSERT(m_Iterator);
         ++m_Iterator;
         return *this;
     }
 
     TypedIterator& operator+=(const unsigned int increment) override
     {
+        BOOST_ASSERT(m_Iterator);
         m_Iterator += increment;
         return *this;
     }
 
     TypedIterator& operator-=(const unsigned int increment) override
     {
+        BOOST_ASSERT(m_Iterator);
         m_Iterator -= increment;
         return *this;
     }
 
     TypedIterator& operator[](const unsigned int index) override
     {
+        BOOST_ASSERT(m_Iterator);
         m_Iterator = m_Start + index;
         return *this;
     }
@@ -94,6 +110,9 @@
     QASymm8Decoder(const uint8_t* data, const float scale, const int32_t offset)
         : TypedIterator(data), m_Scale(scale), m_Offset(offset) {}
 
+    QASymm8Decoder(const float scale, const int32_t offset)
+        : QASymm8Decoder(nullptr, scale, offset) {}
+
     float Get() const override
     {
         return armnn::Dequantize(*m_Iterator, m_Scale, m_Offset);
@@ -110,6 +129,9 @@
     QSymm16Decoder(const int16_t* data, const float scale, const int32_t offset)
         : TypedIterator(data), m_Scale(scale), m_Offset(offset) {}
 
+    QSymm16Decoder(const float scale, const int32_t offset)
+        : QSymm16Decoder(nullptr, scale, offset) {}
+
     float Get() const override
     {
         return armnn::Dequantize(*m_Iterator, m_Scale, m_Offset);
@@ -126,6 +148,9 @@
     FloatDecoder(const float* data)
         : TypedIterator(data) {}
 
+    FloatDecoder()
+        : FloatDecoder(nullptr) {}
+
     float Get() const override
     {
         return *m_Iterator;
@@ -138,6 +163,9 @@
     ScaledInt32Decoder(const int32_t* data, const float scale)
         : TypedIterator(data), m_Scale(scale) {}
 
+    ScaledInt32Decoder(const float scale)
+        : ScaledInt32Decoder(nullptr, scale) {}
+
     float Get() const override
     {
         return static_cast<float>(*m_Iterator) * m_Scale;
@@ -153,6 +181,9 @@
     QASymm8Encoder(uint8_t* data, const float scale, const int32_t offset)
         : TypedIterator(data), m_Scale(scale), m_Offset(offset) {}
 
+    QASymm8Encoder(const float scale, const int32_t offset)
+        : QASymm8Encoder(nullptr, scale, offset) {}
+
     void Set(float right) override
     {
         *m_Iterator = armnn::Quantize<uint8_t>(right, m_Scale, m_Offset);
@@ -174,6 +205,9 @@
     QSymm16Encoder(int16_t* data, const float scale, const int32_t offset)
         : TypedIterator(data), m_Scale(scale), m_Offset(offset) {}
 
+    QSymm16Encoder(const float scale, const int32_t offset)
+        : QSymm16Encoder(nullptr, scale, offset) {}
+
     void Set(float right) override
     {
         *m_Iterator = armnn::Quantize<int16_t>(right, m_Scale, m_Offset);
@@ -195,6 +229,9 @@
     FloatEncoder(float* data)
         : TypedIterator(data) {}
 
+    FloatEncoder()
+        : FloatEncoder(nullptr) {}
+
     void Set(float right) override
     {
         *m_Iterator = right;
@@ -212,6 +249,9 @@
     BooleanEncoder(uint8_t* data)
         : TypedIterator(data) {}
 
+    BooleanEncoder()
+        : BooleanEncoder(nullptr) {}
+
     void Set(bool right) override
     {
         *m_Iterator = right;
diff --git a/src/backends/reference/workloads/Decoders.hpp b/src/backends/reference/workloads/Decoders.hpp
index f5ec906..793e550 100644
--- a/src/backends/reference/workloads/Decoders.hpp
+++ b/src/backends/reference/workloads/Decoders.hpp
@@ -13,7 +13,7 @@
 {
 
 template<typename T>
-inline std::unique_ptr<Decoder<T>> MakeDecoder(const TensorInfo& info, const void* data);
+inline std::unique_ptr<Decoder<T>> MakeDecoder(const TensorInfo& info, const void* data = nullptr);
 
 template<>
 inline std::unique_ptr<Decoder<float>> MakeDecoder(const TensorInfo& info, const void* data)
diff --git a/src/backends/reference/workloads/Encoders.hpp b/src/backends/reference/workloads/Encoders.hpp
index af3b937..ed92393 100644
--- a/src/backends/reference/workloads/Encoders.hpp
+++ b/src/backends/reference/workloads/Encoders.hpp
@@ -13,7 +13,7 @@
 {
 
 template<typename T>
-inline std::unique_ptr<Encoder<T>> MakeEncoder(const TensorInfo& info, void* data);
+inline std::unique_ptr<Encoder<T>> MakeEncoder(const TensorInfo& info, void* data = nullptr);
 
 template<>
 inline std::unique_ptr<Encoder<float>> MakeEncoder(const TensorInfo& info, void* data)
diff --git a/src/backends/reference/workloads/RefConvolution2dWorkload.cpp b/src/backends/reference/workloads/RefConvolution2dWorkload.cpp
index a660d2e..dad9936 100644
--- a/src/backends/reference/workloads/RefConvolution2dWorkload.cpp
+++ b/src/backends/reference/workloads/RefConvolution2dWorkload.cpp
@@ -34,16 +34,19 @@
 {
     const TensorInfo& inputInfo = GetTensorInfo(m_Data.m_Inputs[0]);
     m_InputShape = inputInfo.GetShape();
-    m_InputDecoder = MakeDecoder<float>(inputInfo, m_Data.m_Inputs[0]->Map());
+    m_InputDecoder = MakeDecoder<float>(inputInfo);
 
     const TensorInfo& outputInfo = GetTensorInfo(m_Data.m_Outputs[0]);
     m_OutputShape = outputInfo.GetShape();
-    m_OutputEncoder = MakeEncoder<float>(outputInfo, m_Data.m_Outputs[0]->Map());
+    m_OutputEncoder = MakeEncoder<float>(outputInfo);
 }
 
 void RefConvolution2dWorkload::Execute() const {
     ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefConvolution2dWorkload_Execute");
 
+    m_InputDecoder->Reset(m_Data.m_Inputs[0]->Map());
+    m_OutputEncoder->Reset(m_Data.m_Outputs[0]->Map());
+
     Convolve(m_InputShape, *m_InputDecoder, m_OutputShape, *m_OutputEncoder, m_FilterShape,
              *m_FilterDecoder, m_Data.m_Parameters.m_BiasEnabled, m_BiasDecoder.get(),
              m_Data.m_Parameters.m_DataLayout, m_Data.m_Parameters.m_PadTop, m_Data.m_Parameters.m_PadLeft,
diff --git a/src/backends/reference/workloads/RefDepthwiseConvolution2dWorkload.cpp b/src/backends/reference/workloads/RefDepthwiseConvolution2dWorkload.cpp
index 48a20cf..cfc81ce 100644
--- a/src/backends/reference/workloads/RefDepthwiseConvolution2dWorkload.cpp
+++ b/src/backends/reference/workloads/RefDepthwiseConvolution2dWorkload.cpp
@@ -36,11 +36,11 @@
 {
     const TensorInfo& inputInfo = GetTensorInfo(m_Data.m_Inputs[0]);
     m_InputShape = inputInfo.GetShape();
-    m_InputDecoder = MakeDecoder<float>(inputInfo, m_Data.m_Inputs[0]->Map());
+    m_InputDecoder = MakeDecoder<float>(inputInfo);
 
     const TensorInfo& outputInfo = GetTensorInfo(m_Data.m_Outputs[0]);
     m_OutputShape = outputInfo.GetShape();
-    m_OutputEncoder = MakeEncoder<float>(outputInfo, m_Data.m_Outputs[0]->Map());
+    m_OutputEncoder = MakeEncoder<float>(outputInfo);
 }
 
 void RefDepthwiseConvolution2dWorkload::Execute() const
@@ -48,6 +48,9 @@
     ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefDepthwiseConvolution2dWorkload_Execute");
     std::unique_ptr<Decoder<float>> pBiasDecoder{};
 
+    m_InputDecoder->Reset(m_Data.m_Inputs[0]->Map());
+    m_OutputEncoder->Reset(m_Data.m_Outputs[0]->Map());
+
     Convolve(m_InputShape, *m_InputDecoder, m_OutputShape, *m_OutputEncoder,
              m_FilterShape, *m_FilterDecoder, m_Data.m_Parameters.m_BiasEnabled, m_BiasDecoder.get(),
              m_Data.m_Parameters.m_DataLayout, m_Data.m_Parameters.m_PadTop, m_Data.m_Parameters.m_PadLeft,
diff --git a/src/backends/reference/workloads/RefElementwiseWorkload.cpp b/src/backends/reference/workloads/RefElementwiseWorkload.cpp
index cbacd9c..6431348 100644
--- a/src/backends/reference/workloads/RefElementwiseWorkload.cpp
+++ b/src/backends/reference/workloads/RefElementwiseWorkload.cpp
@@ -32,9 +32,9 @@
     const TensorInfo& inputInfo1 = GetTensorInfo(m_Data.m_Inputs[1]);
     const TensorInfo& outputInfo = GetTensorInfo(m_Data.m_Outputs[0]);
 
-    m_Input0 = MakeDecoder<InType>(inputInfo0, m_Data.m_Inputs[0]->Map());
-    m_Input1 = MakeDecoder<InType>(inputInfo1, m_Data.m_Inputs[1]->Map());
-    m_Output = MakeEncoder<OutType>(outputInfo, m_Data.m_Outputs[0]->Map());
+    m_Input0 = MakeDecoder<InType>(inputInfo0);
+    m_Input1 = MakeDecoder<InType>(inputInfo1);
+    m_Output = MakeEncoder<OutType>(outputInfo);
 }
 
 template <typename Functor, typename ParentDescriptor, typename armnn::StringMapping::Id DebugString>
@@ -49,6 +49,10 @@
     const TensorShape& inShape1 = inputInfo1.GetShape();
     const TensorShape& outShape = outputInfo.GetShape();
 
+    m_Input0->Reset(m_Data.m_Inputs[0]->Map());
+    m_Input1->Reset(m_Data.m_Inputs[1]->Map());
+    m_Output->Reset(m_Data.m_Outputs[0]->Map());
+
     ElementwiseFunction<Functor>(inShape0,
                                  inShape1,
                                  outShape,
diff --git a/src/backends/reference/workloads/RefFullyConnectedWorkload.cpp b/src/backends/reference/workloads/RefFullyConnectedWorkload.cpp
index c7a3d90..ac82db9 100644
--- a/src/backends/reference/workloads/RefFullyConnectedWorkload.cpp
+++ b/src/backends/reference/workloads/RefFullyConnectedWorkload.cpp
@@ -34,11 +34,11 @@
     const TensorInfo& inputInfo = GetTensorInfo(m_Data.m_Inputs[0]);
     BOOST_ASSERT(inputInfo.GetNumDimensions() > 1);
     m_InputShape = inputInfo.GetShape();
-    m_InputDecoder = MakeDecoder<float>(inputInfo, m_Data.m_Inputs[0]->Map());
+    m_InputDecoder = MakeDecoder<float>(inputInfo);
 
     const TensorInfo& outputInfo = GetTensorInfo(m_Data.m_Outputs[0]);
     m_OutputShape = outputInfo.GetShape();
-    m_OutputEncoder = MakeEncoder<float>(outputInfo, m_Data.m_Outputs[0]->Map());
+    m_OutputEncoder = MakeEncoder<float>(outputInfo);
 
     m_NumActivations = 1; // Total number of activations in the input.
     for (unsigned int i = 1; i < inputInfo.GetNumDimensions(); i++)
@@ -51,6 +51,9 @@
 {
     ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefFullyConnectedWorkload_Execute");
 
+    m_InputDecoder->Reset(m_Data.m_Inputs[0]->Map());
+    m_OutputEncoder->Reset(m_Data.m_Outputs[0]->Map());
+
     FullyConnected(m_InputShape,
                    *m_InputDecoder,
                    m_OutputShape,