7#include <aws/bedrock/Bedrock_EXPORTS.h>
8#include <aws/core/utils/memory/stl/AWSString.h>
9#include <aws/core/utils/memory/stl/AWSVector.h>
47 m_temperatureHasBeenSet =
true;
48 m_temperature = value;
62 inline double GetTopP()
const {
return m_topP; }
65 m_topPHasBeenSet =
true;
84 m_maxTokensHasBeenSet =
true;
102 template <
typename StopSequencesT = Aws::Vector<Aws::String>>
104 m_stopSequencesHasBeenSet =
true;
105 m_stopSequences = std::forward<StopSequencesT>(value);
107 template <
typename StopSequencesT = Aws::Vector<Aws::String>>
112 template <
typename StopSequencesT = Aws::String>
114 m_stopSequencesHasBeenSet =
true;
115 m_stopSequences.emplace_back(std::forward<StopSequencesT>(value));
120 double m_temperature{0.0};
127 bool m_temperatureHasBeenSet =
false;
128 bool m_topPHasBeenSet =
false;
129 bool m_maxTokensHasBeenSet =
false;
130 bool m_stopSequencesHasBeenSet =
false;
const Aws::Vector< Aws::String > & GetStopSequences() const
TextInferenceConfig & WithTemperature(double value)
TextInferenceConfig & WithTopP(double value)
TextInferenceConfig & WithMaxTokens(int value)
AWS_BEDROCK_API TextInferenceConfig & operator=(Aws::Utils::Json::JsonView jsonValue)
void SetStopSequences(StopSequencesT &&value)
TextInferenceConfig & AddStopSequences(StopSequencesT &&value)
TextInferenceConfig & WithStopSequences(StopSequencesT &&value)
bool StopSequencesHasBeenSet() const
void SetMaxTokens(int value)
bool TopPHasBeenSet() const
AWS_BEDROCK_API Aws::Utils::Json::JsonValue Jsonize() const
void SetTopP(double value)
AWS_BEDROCK_API TextInferenceConfig()=default
bool TemperatureHasBeenSet() const
void SetTemperature(double value)
double GetTemperature() const
AWS_BEDROCK_API TextInferenceConfig(Aws::Utils::Json::JsonView jsonValue)
bool MaxTokensHasBeenSet() const
std::shared_ptr< T > MakeShared(const char *allocationTag, ArgTypes &&... args)
std::vector< T, Aws::Allocator< T > > Vector
Aws::Utils::Json::JsonValue JsonValue