13
#define USING_LOG_PREFIX PL
15
#include "ob_pl_allocator.h"
16
#include "ob_pl_package_state.h"
23
void* ObPLAllocator::alloc(const int64_t size, const ObMemAttr &attr)
27
if (OB_ISNULL(curr_)) {
28
ret = OB_ERR_UNEXPECTED;
29
LOG_ERROR("current allocator is null", K(ret), K(curr_));
30
} else if (can_shrink_ && curr_->used() > next_threshold_) {
31
if (OB_FAIL(shrink())) {
32
LOG_WARN("failed to shrink buffer", K(ret), K(next_threshold_), K(threshold_));
36
ptr = curr_->alloc(size, attr);
41
void ObPLAllocator::reset()
46
backup_ = &allocator2_;
49
int ObPLAllocator::shrink()
52
if (OB_ISNULL(curr_)) {
53
ret = OB_ERR_UNEXPECTED;
54
LOG_WARN("current allocator is null", K(ret), K(curr_));
55
} else if (curr_->used() < next_threshold_) {
57
} else if (OB_FAIL(copy_all_element_with_new_allocator(backup_))) {
58
LOG_WARN("failed to copy all element",
59
K(ret), K(next_threshold_), K(curr_->used()), K(backup_->used()));
62
next_threshold_ = std::max(2 * backup_->used(), threshold_);
63
LOG_INFO("NOTICE: shrink allocator done!",
66
K(backup_->used()), K(curr_->used()),
67
K(curr_), K(backup_));
69
ObIAllocator *tmp = curr_;
77
int ObPLCollAllocator::free_child_coll(ObPLCollection &dest)
81
for (int64_t i = 0; OB_SUCC(ret) && i < dest.get_count(); ++i) {
82
OZ (ObUserDefinedType::destruct_obj(dest.get_data()[i], NULL));
87
int ObPLCollAllocator::copy_all_element_with_new_allocator(ObIAllocator *allocator)
90
#ifndef OB_BUILD_ORACLE_PL
92
ret = OB_NOT_SUPPORTED;
93
LOG_WARN("not support", K(ret));
95
if (OB_ISNULL(allocator)) {
96
ret = OB_ERR_UNEXPECTED;
97
LOG_WARN("copy allocator is null", K(ret), K(allocator));
98
} else if (OB_ISNULL(coll_)) {
99
ret = OB_ERR_UNEXPECTED;
100
LOG_WARN("collection is null", K(ret), K(coll_));
103
#define DEEP_COPY_COLLECTION(type, class) \
105
if (OB_ISNULL(dest = reinterpret_cast<class*>(allocator->alloc(sizeof(class))))) { \
106
ret = OB_ALLOCATE_MEMORY_FAILED; \
107
LOG_WARN("failed to alloc memory for collection", K(ret), K(allocator), KPC(coll_), KPC(dest)); \
108
} else if (FALSE_IT(dest = new (dest) class(coll_->get_id()))) { \
109
} else if (OB_FAIL((reinterpret_cast<class*>(dest))->deep_copy(coll_, allocator))) { \
110
LOG_WARN("failed to deep copy", K(ret), K(allocator), KPC(coll_), KPC(dest)); \
111
int tmp = ObPLCollAllocator::free_child_coll(reinterpret_cast<ObPLCollection&>(*dest)); \
112
if (OB_SUCCESS != tmp) { \
113
LOG_WARN("failed to free child memory", K(tmp)); \
118
ObPLCollection* dest = NULL;
119
switch (coll_->get_type()) {
120
DEEP_COPY_COLLECTION(PL_NESTED_TABLE_TYPE, ObPLNestedTable);
121
DEEP_COPY_COLLECTION(PL_ASSOCIATIVE_ARRAY_TYPE, ObPLAssocArray);
122
DEEP_COPY_COLLECTION(PL_VARRAY_TYPE, ObPLVArray);
124
ret = OB_ERR_UNEXPECTED;
125
LOG_WARN("unknow collection type", K(ret), K(coll_->get_type()), KPC(coll_));
129
#undef DEEP_COPY_COLLECTION
132
dest->set_allocator(this);
133
for (int64_t i = 0; OB_SUCC(ret) && i < coll_->get_count(); ++i) {
134
if (OB_FAIL(ObUserDefinedType::destruct_obj(coll_->get_data()[i], NULL))) {
135
LOG_WARN("failed to destruct collection", K(ret), K(coll_->get_data()[i]), K(i));
139
coll_->set_allocator(dest->get_allocator());
140
coll_->set_data(dest->get_data());
141
if (PL_ASSOCIATIVE_ARRAY_TYPE == coll_->get_type()) {
142
ObPLAssocArray* src = static_cast<ObPLAssocArray*>(coll_);
143
ObPLAssocArray* dst = static_cast<ObPLAssocArray*>(dest);
144
if (OB_ISNULL(src) || OB_ISNULL(dst)) {
145
ret = OB_ERR_UNEXPECTED;
146
LOG_WARN("unexpected associative array pointer", K(ret), KPC(coll_), KPC(dst));
148
src->set_key(dst->get_key());
149
src->set_sort(dst->get_sort());
154
LOG_INFO("copy all element with new allocator in collection", K(ret), KPC(coll_), KPC(dest), K(lbt()));
160
int ObPLSymbolAllocator::copy_all_element_with_new_allocator(ObIAllocator *allocator)
162
int ret = OB_SUCCESS;
163
if (OB_ISNULL(allocator)) {
164
ret = OB_ERR_UNEXPECTED;
165
LOG_WARN("copy allocator is null", K(ret), K(allocator));
166
} else if (OB_ISNULL(pl_)) {
167
ret = OB_ERR_UNEXPECTED;
168
} else if (OB_ISNULL(pl_->params_)) {
169
ret = OB_ERR_UNEXPECTED;
170
LOG_WARN("pl symbols is null", K(ret), K(pl_->params_));
171
} else if (OB_ISNULL(pl_->result_)) {
172
ret = OB_ERR_UNEXPECTED;
173
LOG_WARN("pl result is null", K(ret), K(pl_->result_));
175
ParamStore *params = pl_->params_;
176
for (int64_t i = 0; OB_SUCC(ret) && i < params->count(); ++i) {
178
ObObj *src = &(params->at(i));
179
OZ (deep_copy_obj(*allocator, *src, dst));
180
CK (params->at(i).apply(dst));
181
OX (params->at(i).set_param_meta());
184
OZ (deep_copy_obj(*allocator, *(pl_->result_), dst_result));
185
OX ((*pl_->result_).apply(dst_result));
190
int ObPLPkgAllocator::copy_all_element_with_new_allocator(ObIAllocator *allocator)
192
int ret = OB_SUCCESS;
193
LOG_INFO("copy all element with new allocator in package state");
194
CK (OB_NOT_NULL(allocator));
195
CK (OB_NOT_NULL(state_));
197
ObIArray<ObObj> &vars = state_->get_vars();
198
for (int64_t i = 0; OB_SUCC(ret) && i < vars.count(); ++i) {
200
if (vars.at(i).is_pl_extend()
201
&& vars.at(i).get_meta().get_extend_type() != PL_CURSOR_TYPE
202
&& vars.at(i).get_meta().get_extend_type() != PL_REF_CURSOR_TYPE) {
203
OZ (pl::ObUserDefinedType::deep_copy_obj(*allocator, vars.at(i), dst, true));
204
OZ (pl::ObUserDefinedType::destruct_obj(vars.at(i), nullptr));
206
OZ (deep_copy_obj(*allocator, vars.at(i), dst));
208
OX (vars.at(i) = dst);