Remove use of PostAllocationConfigure from ExecuteAsync calls

 * Resolves: IVGCVSW-6952

Signed-off-by: Finn Williams <finn.williams@arm.com>
Change-Id: Ic85bd5267cf94e0ee8461ff4e62b9db3cb80877a
diff --git a/include/armnn/backends/IWorkload.hpp b/include/armnn/backends/IWorkload.hpp
index 20be016..c7bc5da 100644
--- a/include/armnn/backends/IWorkload.hpp
+++ b/include/armnn/backends/IWorkload.hpp
@@ -23,6 +23,8 @@
 public:
     virtual ~IWorkload() {}
 
+    // Note: do not call for async networks via ExecuteAsync or otherwise,
+    // as async networks memory is allocated outside the workload.
     virtual void PostAllocationConfigure() = 0;
 
     virtual void Execute() const = 0;
diff --git a/src/backends/reference/workloads/RefConvolution2dWorkload.cpp b/src/backends/reference/workloads/RefConvolution2dWorkload.cpp
index fe97cb1..3ddbdce 100644
--- a/src/backends/reference/workloads/RefConvolution2dWorkload.cpp
+++ b/src/backends/reference/workloads/RefConvolution2dWorkload.cpp
@@ -15,6 +15,9 @@
 RefConvolution2dWorkload::RefConvolution2dWorkload(const Convolution2dQueueDescriptor& descriptor,
                                                    const WorkloadInfo& info)
     : RefBaseWorkload<Convolution2dQueueDescriptor>(descriptor, info)
+    , m_InputShape(info.m_InputTensorInfos[0].GetShape())
+    , m_FilterShape(info.m_InputTensorInfos[1].GetShape())
+    , m_OutputShape(info.m_OutputTensorInfos[0].GetShape())
 {
     WorkloadInfo detailsInfo;
     detailsInfo.m_InputTensorInfos = info.m_InputTensorInfos;
@@ -27,33 +30,6 @@
                                          this->GetGuid());
 }
 
-void RefConvolution2dWorkload::PostAllocationConfigure()
-{
-    PostAllocationConfigure(m_Data.m_Inputs, m_Data.m_Outputs);
-}
-
-void RefConvolution2dWorkload::PostAllocationConfigure(std::vector<ITensorHandle*> inputs,
-                                                        std::vector<ITensorHandle*> outputs)
-{
-    const TensorInfo& inputInfo = GetTensorInfo(inputs[0]);
-    ARMNN_ASSERT(inputInfo.GetNumDimensions() > 1);
-    m_InputShape = inputInfo.GetShape();
-
-    const TensorInfo& rFilterInfo = GetTensorInfo(inputs[1]);
-    ARMNN_ASSERT(inputInfo.GetNumDimensions() > 1);
-    m_FilterShape = rFilterInfo.GetShape();
-    m_FilterDecoder = MakeDecoder<float>(rFilterInfo);
-
-    if (m_Data.m_Parameters.m_BiasEnabled)
-    {
-        const TensorInfo& biasInfo = GetTensorInfo(inputs[2]);
-        m_BiasDecoder = MakeDecoder<float>(biasInfo);
-    }
-
-    const TensorInfo& outputInfo = GetTensorInfo(outputs[0]);
-    m_OutputShape = outputInfo.GetShape();
-}
-
 void RefConvolution2dWorkload::Execute() const
 {
     Execute(m_Data.m_Inputs, m_Data.m_Outputs);
@@ -61,8 +37,6 @@
 
 void RefConvolution2dWorkload::ExecuteAsync(WorkingMemDescriptor& workingMemDescriptor)
 {
-    PostAllocationConfigure(workingMemDescriptor.m_Inputs, workingMemDescriptor.m_Outputs);
-
     Execute(workingMemDescriptor.m_Inputs, workingMemDescriptor.m_Outputs);
 }
 
@@ -73,14 +47,16 @@
     std::unique_ptr<Decoder<float>> inputDecoder = MakeDecoder<float>(GetTensorInfo(inputs[0]), inputs[0]->Map());
     std::unique_ptr<Encoder<float>> outputEncoder = MakeEncoder<float>(GetTensorInfo(outputs[0]), outputs[0]->Map());
 
-    m_FilterDecoder->Reset(inputs[1]->Map());
+    std::unique_ptr<Decoder<float>> weightsDecoder = MakeDecoder<float>(GetTensorInfo(inputs[1]), inputs[1]->Map());
+    std::unique_ptr<Decoder<float>> biasDecoder;
+
     if (m_Data.m_Parameters.m_BiasEnabled)
     {
-        m_BiasDecoder->Reset(inputs[2]->Map());
+        biasDecoder = MakeDecoder<float>(GetTensorInfo(inputs[2]), inputs[2]->Map());
     }
 
     Convolve(m_InputShape, *inputDecoder, m_OutputShape, *outputEncoder, m_FilterShape,
-             *m_FilterDecoder, m_Data.m_Parameters.m_BiasEnabled, m_BiasDecoder.get(),
+             *weightsDecoder, m_Data.m_Parameters.m_BiasEnabled, biasDecoder.get(),
              m_Data.m_Parameters.m_DataLayout, m_Data.m_Parameters.m_PadTop, m_Data.m_Parameters.m_PadLeft,
              m_Data.m_Parameters.m_StrideX, m_Data.m_Parameters.m_StrideY,
              m_Data.m_Parameters.m_DilationX, m_Data.m_Parameters.m_DilationY);
diff --git a/src/backends/reference/workloads/RefConvolution2dWorkload.hpp b/src/backends/reference/workloads/RefConvolution2dWorkload.hpp
index 1cb30b6..f0d7037 100644
--- a/src/backends/reference/workloads/RefConvolution2dWorkload.hpp
+++ b/src/backends/reference/workloads/RefConvolution2dWorkload.hpp
@@ -19,21 +19,15 @@
     explicit RefConvolution2dWorkload(const Convolution2dQueueDescriptor& descriptor,
                                       const WorkloadInfo& info);
 
-    void PostAllocationConfigure() override;
-
     void Execute() const override;
     void ExecuteAsync(WorkingMemDescriptor& workingMemDescriptor)  override;
 
 private:
-    void PostAllocationConfigure(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs);
     void Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const;
 
-    std::unique_ptr<Decoder<float>> m_FilterDecoder;
-    std::unique_ptr<Decoder<float>> m_BiasDecoder;
-
-    TensorShape m_InputShape;
-    TensorShape m_OutputShape;
-    TensorShape m_FilterShape;
+    const TensorShape m_InputShape;
+    const TensorShape m_FilterShape;
+    const TensorShape m_OutputShape;
 };
 
 } //namespace armnn
\ No newline at end of file
diff --git a/src/backends/reference/workloads/RefConvolution3dWorkload.cpp b/src/backends/reference/workloads/RefConvolution3dWorkload.cpp
index 5f54280..f6a0ee2 100644
--- a/src/backends/reference/workloads/RefConvolution3dWorkload.cpp
+++ b/src/backends/reference/workloads/RefConvolution3dWorkload.cpp
@@ -32,26 +32,6 @@
                                          this->GetGuid());
 }
 
-void RefConvolution3dWorkload::PostAllocationConfigure()
-{
-    PostAllocationConfigure(m_Data.m_Inputs, m_Data.m_Outputs);
-}
-
-void RefConvolution3dWorkload::PostAllocationConfigure(std::vector<ITensorHandle*> inputs,
-                                                       std::vector<ITensorHandle*> outputs)
-{
-    IgnoreUnused(outputs);
-    const TensorInfo& rFilterInfo = GetTensorInfo(inputs[1]);
-    m_FilterShape = rFilterInfo.GetShape();
-    m_FilterDecoder = MakeDecoder<float>(rFilterInfo);
-
-    if (m_Data.m_Parameters.m_BiasEnabled)
-    {
-        const TensorInfo& biasInfo = GetTensorInfo(inputs[2]);
-        m_BiasDecoder = MakeDecoder<float>(biasInfo);
-    }
-}
-
 void RefConvolution3dWorkload::Execute() const
 {
     Execute(m_Data.m_Inputs, m_Data.m_Outputs);
@@ -59,8 +39,6 @@
 
 void RefConvolution3dWorkload::ExecuteAsync(WorkingMemDescriptor& workingMemDescriptor)
 {
-    PostAllocationConfigure(workingMemDescriptor.m_Inputs, workingMemDescriptor.m_Outputs);
-
     Execute(workingMemDescriptor.m_Inputs, workingMemDescriptor.m_Outputs);
 }
 
@@ -74,14 +52,17 @@
     const TensorShape& inputShape = GetTensorInfo(inputs[0]).GetShape();
     const TensorShape& outputShape = GetTensorInfo(outputs[0]).GetShape();
 
-    m_FilterDecoder->Reset(inputs[1]->Map());
+    const auto& filterInfo = GetTensorInfo(inputs[1]);
+    std::unique_ptr<Decoder<float>> filterDecoder = MakeDecoder<float>(GetTensorInfo(inputs[1]), inputs[1]->Map());
+    std::unique_ptr<Decoder<float>> biasDecoder;
+
     if (m_Data.m_Parameters.m_BiasEnabled)
     {
-        m_BiasDecoder->Reset(inputs[2]->Map());
+        biasDecoder = MakeDecoder<float>(GetTensorInfo(inputs[2]), inputs[2]->Map());
     }
 
-    Convolve3d(inputShape, *inputDecoder, outputShape, *outputEncoder, m_FilterShape,
-               *m_FilterDecoder, m_Data.m_Parameters.m_BiasEnabled, m_BiasDecoder.get(),
+    Convolve3d(inputShape, *inputDecoder, outputShape, *outputEncoder, filterInfo.GetShape(),
+               *filterDecoder, m_Data.m_Parameters.m_BiasEnabled, biasDecoder.get(),
                m_Data.m_Parameters.m_DataLayout,
                m_Data.m_Parameters.m_PadTop, m_Data.m_Parameters.m_PadLeft, m_Data.m_Parameters.m_PadFront,
                m_Data.m_Parameters.m_StrideX, m_Data.m_Parameters.m_StrideY, m_Data.m_Parameters.m_StrideZ,
diff --git a/src/backends/reference/workloads/RefConvolution3dWorkload.hpp b/src/backends/reference/workloads/RefConvolution3dWorkload.hpp
index 6c74675..b53f3a5 100644
--- a/src/backends/reference/workloads/RefConvolution3dWorkload.hpp
+++ b/src/backends/reference/workloads/RefConvolution3dWorkload.hpp
@@ -19,19 +19,11 @@
     explicit RefConvolution3dWorkload(const Convolution3dQueueDescriptor& descriptor,
                                       const WorkloadInfo& info);
 
-    void PostAllocationConfigure() override;
-
     void Execute() const override;
     void ExecuteAsync(WorkingMemDescriptor& workingMemDescriptor)  override;
 
 private:
-    void PostAllocationConfigure(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs);
     void Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const;
-
-    std::unique_ptr<Decoder<float>> m_FilterDecoder;
-    std::unique_ptr<Decoder<float>> m_BiasDecoder;
-
-    TensorShape m_FilterShape;
 };
 
 } //namespace armnn
diff --git a/src/backends/reference/workloads/RefFullyConnectedWorkload.cpp b/src/backends/reference/workloads/RefFullyConnectedWorkload.cpp
index c6ea147..087fc9d 100644
--- a/src/backends/reference/workloads/RefFullyConnectedWorkload.cpp
+++ b/src/backends/reference/workloads/RefFullyConnectedWorkload.cpp
@@ -12,45 +12,28 @@
 
 namespace armnn
 {
+
+unsigned int GetNumActivations(const TensorInfo& inputInfo)
+{
+    unsigned int numActivations = 1; // Total number of activations in the input.
+    for (unsigned int i = 1; i < inputInfo.GetNumDimensions(); i++)
+    {
+        numActivations *= inputInfo.GetShape()[i];
+    }
+    return numActivations;
+}
+
+
 RefFullyConnectedWorkload::RefFullyConnectedWorkload(
     const FullyConnectedQueueDescriptor& descriptor, const WorkloadInfo& info)
         : RefBaseWorkload<FullyConnectedQueueDescriptor>(descriptor, info)
+        , m_InputShape(info.m_InputTensorInfos[0].GetShape())
+        , m_WeightShape(info.m_InputTensorInfos[1].GetShape())
+        , m_OutputShape(info.m_OutputTensorInfos[0].GetShape())
+        , m_NumActivations(GetNumActivations(info.m_InputTensorInfos[0]))
 {
 }
 
-void RefFullyConnectedWorkload::PostAllocationConfigure()
-{
-    PostAllocationConfigure(m_Data.m_Inputs, m_Data.m_Outputs);
-}
-
-void RefFullyConnectedWorkload::PostAllocationConfigure(std::vector<ITensorHandle*> inputs,
-                                                        std::vector<ITensorHandle*> outputs)
-{
-    const TensorInfo& inputInfo = GetTensorInfo(inputs[0]);
-    ARMNN_ASSERT(inputInfo.GetNumDimensions() > 1);
-    m_InputShape = inputInfo.GetShape();
-
-    const TensorInfo& rWeightInfo = GetTensorInfo(inputs[1]);
-    ARMNN_ASSERT(inputInfo.GetNumDimensions() > 1);
-    m_WeightShape = rWeightInfo.GetShape();
-    m_WeightDecoder = MakeDecoder<float>(rWeightInfo);
-
-    if (m_Data.m_Parameters.m_BiasEnabled)
-    {
-        const TensorInfo& biasInfo = GetTensorInfo(inputs[2]);
-        m_BiasDecoder = MakeDecoder<float>(biasInfo);
-    }
-
-    const TensorInfo& outputInfo = GetTensorInfo(outputs[0]);
-    m_OutputShape = outputInfo.GetShape();
-
-    m_NumActivations = 1; // Total number of activations in the input.
-    for (unsigned int i = 1; i < inputInfo.GetNumDimensions(); i++)
-    {
-        m_NumActivations *= inputInfo.GetShape()[i];
-    }
-}
-
 void RefFullyConnectedWorkload::Execute() const
 {
     Execute(m_Data.m_Inputs, m_Data.m_Outputs);
@@ -58,8 +41,6 @@
 
 void RefFullyConnectedWorkload::ExecuteAsync(WorkingMemDescriptor &workingMemDescriptor)
 {
-    PostAllocationConfigure(workingMemDescriptor.m_Inputs, workingMemDescriptor.m_Outputs);
-
     Execute(workingMemDescriptor.m_Inputs, workingMemDescriptor.m_Outputs);
 }
 
@@ -70,10 +51,12 @@
     std::unique_ptr<Decoder<float>> inputDecoder = MakeDecoder<float>(GetTensorInfo(inputs[0]), inputs[0]->Map());
     std::unique_ptr<Encoder<float>> OutputEncoder = MakeEncoder<float>(GetTensorInfo(outputs[0]), outputs[0]->Map());
 
-    m_WeightDecoder->Reset(inputs[1]->Map());
+    std::unique_ptr<Decoder<float>> weightsDecoder = MakeDecoder<float>(GetTensorInfo(inputs[1]), inputs[1]->Map());
+    std::unique_ptr<Decoder<float>> biasDecoder;
+
     if (m_Data.m_Parameters.m_BiasEnabled)
     {
-        m_BiasDecoder->Reset(inputs[2]->Map());
+        biasDecoder = MakeDecoder<float>(GetTensorInfo(inputs[2]), inputs[2]->Map());
     }
 
     FullyConnected(m_InputShape,
@@ -81,8 +64,8 @@
                    m_OutputShape,
                    *OutputEncoder,
                    m_WeightShape,
-                   *m_WeightDecoder,
-                   m_BiasDecoder.get(),
+                   *weightsDecoder,
+                   biasDecoder.get(),
                    m_Data.m_Parameters.m_BiasEnabled,
                    m_NumActivations,
                    m_Data.m_Parameters.m_TransposeWeightMatrix);
diff --git a/src/backends/reference/workloads/RefFullyConnectedWorkload.hpp b/src/backends/reference/workloads/RefFullyConnectedWorkload.hpp
index 432a887..3bdfb86 100644
--- a/src/backends/reference/workloads/RefFullyConnectedWorkload.hpp
+++ b/src/backends/reference/workloads/RefFullyConnectedWorkload.hpp
@@ -21,24 +21,16 @@
     explicit RefFullyConnectedWorkload(const FullyConnectedQueueDescriptor& descriptor,
                                        const WorkloadInfo& info);
 
-    void PostAllocationConfigure() override;
-
     void Execute() const override;
     void ExecuteAsync(WorkingMemDescriptor& workingMemDescriptor)  override;
 
 private:
-    void PostAllocationConfigure(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs);
     void Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const;
-    std::unique_ptr<ScopedTensorHandle> m_Weight;
-    std::unique_ptr<ScopedTensorHandle> m_Bias;
 
-    std::unique_ptr<Decoder<float>> m_WeightDecoder;
-    std::unique_ptr<Decoder<float>> m_BiasDecoder;
-
-    TensorShape m_InputShape;
-    TensorShape m_OutputShape;
-    TensorShape m_WeightShape;
-    unsigned int m_NumActivations;
+    const TensorShape m_InputShape;
+    const TensorShape m_WeightShape;
+    const TensorShape m_OutputShape;
+    const unsigned int m_NumActivations;
 };
 
 } //namespace armnn