AWS SDK for C++

AWS SDK for C++ Version 1.11.717

Loading...
Searching...
No Matches
InferenceAcceleratorInfo.h
1
6#pragma once
7#include <aws/core/utils/memory/stl/AWSStreamFwd.h>
8#include <aws/core/utils/memory/stl/AWSVector.h>
9#include <aws/ec2/EC2_EXPORTS.h>
10#include <aws/ec2/model/InferenceDeviceInfo.h>
11
12#include <utility>
13
14namespace Aws {
15namespace Utils {
16namespace Xml {
17class XmlNode;
18} // namespace Xml
19} // namespace Utils
20namespace EC2 {
21namespace Model {
22
31 public:
32 AWS_EC2_API InferenceAcceleratorInfo() = default;
35
36 AWS_EC2_API void OutputToStream(Aws::OStream& ostream, const char* location, unsigned index, const char* locationValue) const;
37 AWS_EC2_API void OutputToStream(Aws::OStream& oStream, const char* location) const;
38
40
43 inline const Aws::Vector<InferenceDeviceInfo>& GetAccelerators() const { return m_accelerators; }
44 inline bool AcceleratorsHasBeenSet() const { return m_acceleratorsHasBeenSet; }
45 template <typename AcceleratorsT = Aws::Vector<InferenceDeviceInfo>>
46 void SetAccelerators(AcceleratorsT&& value) {
47 m_acceleratorsHasBeenSet = true;
48 m_accelerators = std::forward<AcceleratorsT>(value);
49 }
50 template <typename AcceleratorsT = Aws::Vector<InferenceDeviceInfo>>
52 SetAccelerators(std::forward<AcceleratorsT>(value));
53 return *this;
54 }
55 template <typename AcceleratorsT = InferenceDeviceInfo>
56 InferenceAcceleratorInfo& AddAccelerators(AcceleratorsT&& value) {
57 m_acceleratorsHasBeenSet = true;
58 m_accelerators.emplace_back(std::forward<AcceleratorsT>(value));
59 return *this;
60 }
62
64
68 inline int GetTotalInferenceMemoryInMiB() const { return m_totalInferenceMemoryInMiB; }
69 inline bool TotalInferenceMemoryInMiBHasBeenSet() const { return m_totalInferenceMemoryInMiBHasBeenSet; }
70 inline void SetTotalInferenceMemoryInMiB(int value) {
71 m_totalInferenceMemoryInMiBHasBeenSet = true;
72 m_totalInferenceMemoryInMiB = value;
73 }
76 return *this;
77 }
79 private:
81
82 int m_totalInferenceMemoryInMiB{0};
83 bool m_acceleratorsHasBeenSet = false;
84 bool m_totalInferenceMemoryInMiBHasBeenSet = false;
85};
86
87} // namespace Model
88} // namespace EC2
89} // namespace Aws
AWS_EC2_API InferenceAcceleratorInfo(const Aws::Utils::Xml::XmlNode &xmlNode)
InferenceAcceleratorInfo & WithTotalInferenceMemoryInMiB(int value)
AWS_EC2_API InferenceAcceleratorInfo()=default
AWS_EC2_API void OutputToStream(Aws::OStream &oStream, const char *location) const
InferenceAcceleratorInfo & AddAccelerators(AcceleratorsT &&value)
AWS_EC2_API void OutputToStream(Aws::OStream &ostream, const char *location, unsigned index, const char *locationValue) const
const Aws::Vector< InferenceDeviceInfo > & GetAccelerators() const
AWS_EC2_API InferenceAcceleratorInfo & operator=(const Aws::Utils::Xml::XmlNode &xmlNode)
InferenceAcceleratorInfo & WithAccelerators(AcceleratorsT &&value)
std::vector< T, Aws::Allocator< T > > Vector
std::basic_ostream< char, std::char_traits< char > > OStream