project
string | commit_id
string | target
int64 | func
string | cwe
string | big_vul_idx
string | idx
int64 | hash
string | size
float64 | message
string | dataset
string |
|---|---|---|---|---|---|---|---|---|---|---|
Chrome
|
fb83de09f2c986ee91741f3a2776feea0e18e3f6
| 1
|
void OverlayWindowViews::OnGestureEvent(ui::GestureEvent* event) {
if (event->type() != ui::ET_GESTURE_TAP)
return;
hide_controls_timer_.Reset();
if (!GetControlsScrimLayer()->visible()) {
UpdateControlsVisibility(true);
return;
}
if (GetCloseControlsBounds().Contains(event->location())) {
controller_->Close(true /* should_pause_video */,
true /* should_reset_pip_player */);
event->SetHandled();
} else if (GetPlayPauseControlsBounds().Contains(event->location())) {
TogglePlayPause();
event->SetHandled();
}
views::Widget::OnGestureEvent(event);
}
|
186551
| 7,290
|
303506592678362065050884877503185093458
| null | null | null |
|
Chrome
|
610f904d8215075c4681be4eb413f4348860bf9f
| 0
|
CallbackList& callbacks() { return callbacks_; }
|
101065
| 90,816
|
6378649268612680894492270454042834907
| null | null | null |
|
tensorflow
|
92dba16749fae36c246bec3f9ba474d9ddeb7662
| 1
|
bool DependencyOptimizer::SafeToRemoveIdentity(const NodeDef& node) const {
if (!IsIdentity(node) && !IsIdentityN(node)) {
return true;
}
if (nodes_to_preserve_.find(node.name()) != nodes_to_preserve_.end()) {
return false;
}
if (!fetch_nodes_known_) {
// The output values of this node may be needed.
return false;
}
if (node.input_size() < 1) {
// Node lacks input, is invalid
return false;
}
const NodeDef* input = node_map_->GetNode(NodeName(node.input(0)));
CHECK(input != nullptr) << "node = " << node.name()
<< " input = " << node.input(0);
// Don't remove Identity nodes corresponding to Variable reads or following
// Recv.
if (IsVariable(*input) || IsRecv(*input)) {
return false;
}
for (const auto& consumer : node_map_->GetOutputs(node.name())) {
if (node.input_size() > 1 && (IsRetval(*consumer) || IsMerge(*consumer))) {
return false;
}
if (IsSwitch(*input)) {
for (const string& consumer_input : consumer->input()) {
if (consumer_input == AsControlDependency(node.name())) {
return false;
}
}
}
}
return true;
}
| null | null | 195,059
|
280470408197015060590448712190364740247
| 40
|
Prevent a null-pointer dereference / `CHECK`-fail in grappler.
PiperOrigin-RevId: 409187354
Change-Id: I369c249cca32e6c56ec193f0ebbf2f2768fc7d43
|
other
|
tensorflow
|
8a513cec4bec15961fbfdedcaa5376522980455c
| 1
|
StatusOr<FullTypeDef> SpecializeType(const AttrSlice& attrs,
const OpDef& op_def) {
FullTypeDef ft;
ft.set_type_id(TFT_PRODUCT);
for (int i = 0; i < op_def.output_arg_size(); i++) {
auto* t = ft.add_args();
*t = op_def.output_arg(i).experimental_full_type();
// Resolve dependent types. The convention for op registrations is to use
// attributes as type variables.
// See https://www.tensorflow.org/guide/create_op#type_polymorphism.
// Once the op signature can be defined entirely in FullType, this
// convention can be deprecated.
//
// Note: While this code performs some basic verifications, it generally
// assumes consistent op defs and attributes. If more complete
// verifications are needed, they should be done by separately, and in a
// way that can be reused for type inference.
for (int j = 0; j < t->args_size(); j++) {
auto* arg = t->mutable_args(i);
if (arg->type_id() == TFT_VAR) {
const auto* attr = attrs.Find(arg->s());
DCHECK(attr != nullptr);
if (attr->value_case() == AttrValue::kList) {
const auto& attr_list = attr->list();
arg->set_type_id(TFT_PRODUCT);
for (int i = 0; i < attr_list.type_size(); i++) {
map_dtype_to_tensor(attr_list.type(i), arg->add_args());
}
} else if (attr->value_case() == AttrValue::kType) {
map_dtype_to_tensor(attr->type(), arg);
} else {
return Status(error::UNIMPLEMENTED,
absl::StrCat("unknown attribute type",
attrs.DebugString(), " key=", arg->s()));
}
arg->clear_s();
}
}
}
return ft;
}
| null | null | 195,067
|
127871006948263872569838116397374697164
| 48
|
Prevent null dereference read in `SpecializeType()`
For some adversarial protos, the attribute for a key might not exist.
PiperOrigin-RevId: 408382090
Change-Id: Ie7eabe532c9ff280fce5dce1f6cdb93c76c2e040
|
other
|
gpac
|
a69b567b8c95c72f9560c873c5ab348be058f340
| 1
|
GF_AV1Config *gf_odf_av1_cfg_read_bs_size(GF_BitStream *bs, u32 size)
{
#ifndef GPAC_DISABLE_AV_PARSERS
AV1State state;
u8 reserved;
GF_AV1Config *cfg;
if (!size) size = (u32) gf_bs_available(bs);
if (!size) return NULL;
cfg = gf_odf_av1_cfg_new();
gf_av1_init_state(&state);
state.config = cfg;
cfg->marker = gf_bs_read_int(bs, 1);
cfg->version = gf_bs_read_int(bs, 7);
cfg->seq_profile = gf_bs_read_int(bs, 3);
cfg->seq_level_idx_0 = gf_bs_read_int(bs, 5);
cfg->seq_tier_0 = gf_bs_read_int(bs, 1);
cfg->high_bitdepth = gf_bs_read_int(bs, 1);
cfg->twelve_bit = gf_bs_read_int(bs, 1);
cfg->monochrome = gf_bs_read_int(bs, 1);
cfg->chroma_subsampling_x = gf_bs_read_int(bs, 1);
cfg->chroma_subsampling_y = gf_bs_read_int(bs, 1);
cfg->chroma_sample_position = gf_bs_read_int(bs, 2);
reserved = gf_bs_read_int(bs, 3);
if (reserved != 0 || cfg->marker != 1 || cfg->version != 1) {
GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[AV1] wrong avcC reserved %d / marker %d / version %d expecting 0 1 1\n", reserved, cfg->marker, cfg->version));
gf_odf_av1_cfg_del(cfg);
return NULL;
}
cfg->initial_presentation_delay_present = gf_bs_read_int(bs, 1);
if (cfg->initial_presentation_delay_present) {
cfg->initial_presentation_delay_minus_one = gf_bs_read_int(bs, 4);
} else {
/*reserved = */gf_bs_read_int(bs, 4);
cfg->initial_presentation_delay_minus_one = 0;
}
size -= 4;
while (size) {
u64 pos, obu_size;
ObuType obu_type;
GF_AV1_OBUArrayEntry *a;
pos = gf_bs_get_position(bs);
obu_size = 0;
if (gf_av1_parse_obu(bs, &obu_type, &obu_size, NULL, &state) != GF_OK) {
GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[AV1] could not parse AV1 OBU at position "LLU". Leaving parsing.\n", pos));
break;
}
assert(obu_size == gf_bs_get_position(bs) - pos);
GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[AV1] parsed AV1 OBU type=%u size="LLU" at position "LLU".\n", obu_type, obu_size, pos));
if (!av1_is_obu_header(obu_type)) {
GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[AV1] AV1 unexpected OBU type=%u size="LLU" found at position "LLU". Forwarding.\n", pos));
}
GF_SAFEALLOC(a, GF_AV1_OBUArrayEntry);
if (!a) break;
a->obu = gf_malloc((size_t)obu_size);
if (!a->obu) {
gf_free(a);
break;
}
gf_bs_seek(bs, pos);
gf_bs_read_data(bs, (char *) a->obu, (u32)obu_size);
a->obu_length = obu_size;
a->obu_type = obu_type;
gf_list_add(cfg->obu_array, a);
if (size<obu_size) {
GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[AV1] AV1 config misses %d bytes to fit the entire OBU\n", obu_size - size));
break;
}
size -= (u32) obu_size;
}
gf_av1_reset_state(& state, GF_TRUE);
return cfg;
#else
return NULL;
#endif
}
| null | null | 195,074
|
94331888032186617444846251549702110835
| 83
|
fixed #1895
|
other
|
tensorflow
|
5b491cd5e41ad63735161cec9c2a568172c8b6a3
| 1
|
bool Tensor::FromProto(Allocator* a, const TensorProto& proto) {
CHECK_NOTNULL(a);
TensorBuffer* p = nullptr;
if (!TensorShape::IsValid(proto.tensor_shape())) return false;
if (proto.dtype() == DT_INVALID) return false;
TensorShape shape(proto.tensor_shape());
const int64_t N = shape.num_elements();
if (N > 0 && proto.dtype()) {
bool dtype_error = false;
if (!proto.tensor_content().empty()) {
const auto& content = proto.tensor_content();
CASES_WITH_DEFAULT(proto.dtype(), p = Helper<T>::Decode(a, content, N),
dtype_error = true, dtype_error = true);
} else {
CASES_WITH_DEFAULT(proto.dtype(), p = FromProtoField<T>(a, proto, N),
dtype_error = true, dtype_error = true);
}
if (dtype_error || p == nullptr) return false;
}
shape_ = shape;
set_dtype(proto.dtype());
UnrefIfNonNull(buf_);
buf_ = p;
// TODO(misard) add tracking of which kernels and steps are calling
// FromProto.
if (MemoryLoggingEnabled() && buf_ != nullptr && buf_->data() != nullptr) {
LogMemory::RecordTensorAllocation("Unknown (from Proto)",
LogMemory::UNKNOWN_STEP_ID, *this);
}
return true;
}
| null | null | 195,083
|
293865285807198215508518477303207322028
| 31
|
Validate `proto.dtype()` before calling `set_dtype()`.
This prevents a `DCHECK`-fail when the proto contains an invalid dtype for a tensor shape with 0 elements or for an incomplete tensor shape.
PiperOrigin-RevId: 408369083
Change-Id: Ia21a3e3d62a90d642a4561f08f3b543e5ad00c46
|
other
|
ImageMagick
|
f221ea0fa3171f0f4fdf74ac9d81b203b9534c23
| 1
|
static Image *ReadPCLImage(const ImageInfo *image_info,ExceptionInfo *exception)
{
#define CropBox "CropBox"
#define DeviceCMYK "DeviceCMYK"
#define MediaBox "MediaBox"
#define RenderPCLText " Rendering PCL... "
char
command[MagickPathExtent],
*density,
filename[MagickPathExtent],
geometry[MagickPathExtent],
*options,
input_filename[MagickPathExtent];
const DelegateInfo
*delegate_info;
Image
*image,
*next_image;
ImageInfo
*read_info;
MagickBooleanType
cmyk,
status;
PointInfo
delta;
RectangleInfo
bounding_box,
page;
char
*p;
ssize_t
c;
SegmentInfo
bounds;
size_t
height,
width;
ssize_t
count;
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
/*
Open image file.
*/
image=AcquireImage(image_info,exception);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
status=AcquireUniqueSymbolicLink(image_info->filename,input_filename);
if (status == MagickFalse)
{
ThrowFileException(exception,FileOpenError,"UnableToCreateTemporaryFile",
image_info->filename);
image=DestroyImageList(image);
return((Image *) NULL);
}
/*
Set the page density.
*/
delta.x=DefaultResolution;
delta.y=DefaultResolution;
if ((image->resolution.x == 0.0) || (image->resolution.y == 0.0))
{
GeometryInfo
geometry_info;
MagickStatusType
flags;
flags=ParseGeometry(PSDensityGeometry,&geometry_info);
if ((flags & RhoValue) != 0)
image->resolution.x=geometry_info.rho;
image->resolution.y=image->resolution.x;
if ((flags & SigmaValue) != 0)
image->resolution.y=geometry_info.sigma;
}
/*
Determine page geometry from the PCL media box.
*/
cmyk=image->colorspace == CMYKColorspace ? MagickTrue : MagickFalse;
count=0;
(void) memset(&bounding_box,0,sizeof(bounding_box));
(void) memset(&bounds,0,sizeof(bounds));
(void) memset(&page,0,sizeof(page));
(void) memset(command,0,sizeof(command));
p=command;
for (c=ReadBlobByte(image); c != EOF; c=ReadBlobByte(image))
{
if (image_info->page != (char *) NULL)
continue;
/*
Note PCL elements.
*/
*p++=(char) c;
if ((c != (int) '/') && (c != '\n') &&
((size_t) (p-command) < (MagickPathExtent-1)))
continue;
*p='\0';
p=command;
/*
Is this a CMYK document?
*/
if (LocaleNCompare(DeviceCMYK,command,strlen(DeviceCMYK)) == 0)
cmyk=MagickTrue;
if (LocaleNCompare(CropBox,command,strlen(CropBox)) == 0)
{
/*
Note region defined by crop box.
*/
count=(ssize_t) sscanf(command,"CropBox [%lf %lf %lf %lf",
&bounds.x1,&bounds.y1,&bounds.x2,&bounds.y2);
if (count != 4)
count=(ssize_t) sscanf(command,"CropBox[%lf %lf %lf %lf",
&bounds.x1,&bounds.y1,&bounds.x2,&bounds.y2);
}
if (LocaleNCompare(MediaBox,command,strlen(MediaBox)) == 0)
{
/*
Note region defined by media box.
*/
count=(ssize_t) sscanf(command,"MediaBox [%lf %lf %lf %lf",
&bounds.x1,&bounds.y1,&bounds.x2,&bounds.y2);
if (count != 4)
count=(ssize_t) sscanf(command,"MediaBox[%lf %lf %lf %lf",
&bounds.x1,&bounds.y1,&bounds.x2,&bounds.y2);
}
if (count != 4)
continue;
/*
Set PCL render geometry.
*/
width=(size_t) floor(bounds.x2-bounds.x1+0.5);
height=(size_t) floor(bounds.y2-bounds.y1+0.5);
if (width > page.width)
page.width=width;
if (height > page.height)
page.height=height;
}
(void) CloseBlob(image);
/*
Render PCL with the GhostPCL delegate.
*/
if ((page.width == 0) || (page.height == 0))
(void) ParseAbsoluteGeometry(PSPageGeometry,&page);
if (image_info->page != (char *) NULL)
(void) ParseAbsoluteGeometry(image_info->page,&page);
(void) FormatLocaleString(geometry,MagickPathExtent,"%.20gx%.20g",(double)
page.width,(double) page.height);
if (image_info->monochrome != MagickFalse)
delegate_info=GetDelegateInfo("pcl:mono",(char *) NULL,exception);
else
if (cmyk != MagickFalse)
delegate_info=GetDelegateInfo("pcl:cmyk",(char *) NULL,exception);
else
delegate_info=GetDelegateInfo("pcl:color",(char *) NULL,exception);
if (delegate_info == (const DelegateInfo *) NULL)
{
image=DestroyImage(image);
return((Image *) NULL);
}
if ((page.width == 0) || (page.height == 0))
(void) ParseAbsoluteGeometry(PSPageGeometry,&page);
if (image_info->page != (char *) NULL)
(void) ParseAbsoluteGeometry(image_info->page,&page);
density=AcquireString("");
options=AcquireString("");
(void) FormatLocaleString(density,MagickPathExtent,"%gx%g",
image->resolution.x,image->resolution.y);
if (image_info->ping != MagickFalse)
(void) FormatLocaleString(density,MagickPathExtent,"2.0x2.0");
page.width=(size_t) floor(page.width*image->resolution.x/delta.x+0.5);
page.height=(size_t) floor(page.height*image->resolution.y/delta.y+0.5);
(void) FormatLocaleString(options,MagickPathExtent,"-g%.20gx%.20g ",(double)
page.width,(double) page.height);
image=DestroyImage(image);
read_info=CloneImageInfo(image_info);
*read_info->magick='\0';
if (read_info->number_scenes != 0)
{
if (read_info->number_scenes != 1)
(void) FormatLocaleString(options,MagickPathExtent,"-dLastPage=%.20g",
(double) (read_info->scene+read_info->number_scenes));
else
(void) FormatLocaleString(options,MagickPathExtent,
"-dFirstPage=%.20g -dLastPage=%.20g",(double) read_info->scene+1,
(double) (read_info->scene+read_info->number_scenes));
read_info->number_scenes=0;
if (read_info->scenes != (char *) NULL)
*read_info->scenes='\0';
}
(void) CopyMagickString(filename,read_info->filename,MagickPathExtent);
(void) AcquireUniqueFilename(read_info->filename);
(void) FormatLocaleString(command,MagickPathExtent,
GetDelegateCommands(delegate_info),
read_info->antialias != MagickFalse ? 4 : 1,
read_info->antialias != MagickFalse ? 4 : 1,density,options,
read_info->filename,input_filename);
options=DestroyString(options);
density=DestroyString(density);
status=ExternalDelegateCommand(MagickFalse,read_info->verbose,command,
(char *) NULL,exception) != 0 ? MagickTrue : MagickFalse;
image=ReadImage(read_info,exception);
(void) RelinquishUniqueFileResource(read_info->filename);
(void) RelinquishUniqueFileResource(input_filename);
read_info=DestroyImageInfo(read_info);
if (image == (Image *) NULL)
ThrowReaderException(DelegateError,"PCLDelegateFailed");
if (LocaleCompare(image->magick,"BMP") == 0)
{
Image
*cmyk_image;
cmyk_image=ConsolidateCMYKImages(image,exception);
if (cmyk_image != (Image *) NULL)
{
image=DestroyImageList(image);
image=cmyk_image;
}
}
do
{
(void) CopyMagickString(image->filename,filename,MagickPathExtent);
image->page=page;
if (image_info->ping != MagickFalse)
{
image->magick_columns*=image->resolution.x/2.0;
image->magick_rows*=image->resolution.y/2.0;
image->columns*=image->resolution.x/2.0;
image->rows*=image->resolution.y/2.0;
}
next_image=SyncNextImageInList(image);
if (next_image != (Image *) NULL)
image=next_image;
} while (next_image != (Image *) NULL);
return(GetFirstImageInList(image));
}
| null | null | 195,237
|
64211844764718959430074467543645938607
| 257
|
Fixes #4985: 4e+26 is outside the range of representable values of type 'unsigned long' at coders/pcl.c:299 (#4986)
* fix Division by zero in XMenuWidget() of MagickCore/widget.c
* Fix memory leak in AnimateImageCommand() of MagickWand/animate.c and DisplayImageCommand() of MagickWand/display.c
* fix Division by zero in ReadEnhMetaFile() of coders/emf.c
* Resolve conflicts
* fix issue: outside the range of representable values of type 'unsigned char' at coders/psd.c:1025
* fix error: 4e+26 is outside the range of representable values of type 'unsigned long' at coders/pcl.c:299
Co-authored-by: zhailiangliang <[email protected]>
|
other
|
tensorflow
|
1b54cadd19391b60b6fcccd8d076426f7221d5e8
| 1
|
void Compute(OpKernelContext *ctx) override {
const Tensor *indices_t, *values_t, *shape_t, *dense_t;
OP_REQUIRES_OK(ctx, ctx->input("sp_indices", &indices_t));
OP_REQUIRES_OK(ctx, ctx->input("sp_values", &values_t));
OP_REQUIRES_OK(ctx, ctx->input("sp_shape", &shape_t));
OP_REQUIRES_OK(ctx, ctx->input("dense", &dense_t));
// Validations.
OP_REQUIRES(ctx, TensorShapeUtils::IsMatrix(indices_t->shape()),
errors::InvalidArgument(
"Input sp_indices should be a matrix but received shape: ",
indices_t->shape().DebugString()));
OP_REQUIRES(ctx,
TensorShapeUtils::IsVector(values_t->shape()) &&
TensorShapeUtils::IsVector(shape_t->shape()),
errors::InvalidArgument(
"Inputs sp_values and sp_shape should be vectors "
"but received shapes: ",
values_t->shape().DebugString(), " and ",
shape_t->shape().DebugString()));
OP_REQUIRES(
ctx, values_t->dim_size(0) == indices_t->dim_size(0),
errors::InvalidArgument(
"The first dimension of values and indices should match. (",
values_t->dim_size(0), " vs. ", indices_t->dim_size(0), ")"));
const auto indices_mat = indices_t->matrix<int64_t>();
const auto shape_vec = shape_t->vec<int64_t>();
const auto lhs_dims = BCast::FromShape(TensorShape(shape_vec));
const auto rhs_dims = BCast::FromShape(dense_t->shape());
BCast b(lhs_dims, rhs_dims, false); // false for keeping the same num dims.
// True iff (size(lhs) >= size(rhs)) and all dims in lhs is greater or equal
// to dims in rhs (from right to left).
auto VecGreaterEq = [](ArraySlice<int64_t> lhs, ArraySlice<int64_t> rhs) {
if (lhs.size() < rhs.size()) return false;
for (size_t i = 0; i < rhs.size(); ++i) {
if (lhs[lhs.size() - 1 - i] < rhs[rhs.size() - 1 - i]) return false;
}
return true;
};
OP_REQUIRES(ctx, VecGreaterEq(lhs_dims, rhs_dims) && b.IsValid(),
errors::InvalidArgument(
"SparseDenseBinaryOpShared broadcasts dense to sparse "
"only; got incompatible shapes: [",
absl::StrJoin(lhs_dims, ","), "] vs. [",
absl::StrJoin(rhs_dims, ","), "]"));
Tensor *output_values = nullptr;
Tensor dense_gathered;
const int64_t nnz = indices_t->dim_size(0);
OP_REQUIRES_OK(ctx,
ctx->allocate_output(0, TensorShape({nnz}), &output_values));
OP_REQUIRES_OK(
ctx, ctx->allocate_temp(DataTypeToEnum<T>::value, TensorShape({nnz}),
&dense_gathered));
bool op_is_div = false;
if (absl::StrContains(ctx->op_kernel().type_string_view(), "Div")) {
op_is_div = true;
}
// Pulls relevant entries from the dense side, with reshape and broadcasting
// *of the dense side* taken into account. Use a TensorRef to avoid blowing
// up memory.
//
// We can directly use the sparse indices to look up dense side, because
// "b.y_reshape()" and "b.y_bcast()" are guaranteed to have rank "ndims".
auto dense_gathered_flat = dense_gathered.flat<T>();
const int ndims = lhs_dims.size();
switch (ndims) {
#define CASE(NDIM) \
case NDIM: { \
TensorRef<Eigen::Tensor<const T, NDIM, Eigen::RowMajor>> rhs_ref = \
dense_t->shaped<T, NDIM>(b.y_reshape()) \
.broadcast(BCast::ToIndexArray<NDIM>(b.y_bcast())); \
Eigen::array<Eigen::DenseIndex, NDIM> idx; \
bool indices_valid = true; \
for (int i = 0; i < nnz; ++i) { \
for (int d = 0; d < NDIM; ++d) { \
idx[d] = internal::SubtleMustCopy(indices_mat(i, d)); \
if (!FastBoundsCheck(idx[d], rhs_ref.dimension(d))) { \
indices_valid = false; \
} \
} \
OP_REQUIRES( \
ctx, indices_valid, \
errors::InvalidArgument("Provided indices are out-of-bounds w.r.t. " \
"dense side with broadcasted shape")); \
dense_gathered_flat(i) = rhs_ref.coeff(idx); \
if (op_is_div) { \
OP_REQUIRES(ctx, dense_gathered_flat(i) != 0, \
errors::InvalidArgument( \
"SparseDenseCwiseDiv cannot divide by zero," \
"but input dense tensor contains zero ")); \
} \
} \
break; \
}
CASE(1);
CASE(2);
CASE(3);
CASE(4);
CASE(5);
default:
OP_REQUIRES(
ctx, false,
errors::InvalidArgument("Only tensors with ranks between 1 and 5 "
"are currently supported. Tensor rank: ",
ndims));
#undef CASE
}
output_values->flat<T>().device(ctx->eigen_device<Device>()) =
values_t->flat<T>().binaryExpr(dense_gathered_flat,
typename Functor::func());
}
| null | null | 195,242
|
149037414387774871757110019685380440226
| 116
|
Add missing validation to sparse dense cwise ops.
PiperOrigin-RevId: 415543133
Change-Id: I5baf3284e919338afb96178c468ad3d3cb0d956c
|
other
|
radare2
|
37897226a1a31f982bfefdc4aeefc2e50355c73c
| 1
|
R_API bool r_io_bank_map_add_top(RIO *io, const ut32 bankid, const ut32 mapid) {
RIOBank *bank = r_io_bank_get (io, bankid);
RIOMap *map = r_io_map_get (io, mapid);
r_return_val_if_fail (io && bank && map, false);
RIOMapRef *mapref = _mapref_from_map (map);
if (!mapref) {
return false;
}
RIOSubMap *sm = r_io_submap_new (io, mapref);
if (!sm) {
free (mapref);
return false;
}
RRBNode *entry = _find_entry_submap_node (bank, sm);
if (!entry) {
// no intersection with any submap, so just insert
if (!r_crbtree_insert (bank->submaps, sm, _find_sm_by_from_vaddr_cb, NULL)) {
free (sm);
free (mapref);
return false;
}
r_list_append (bank->maprefs, mapref);
return true;
}
bank->last_used = NULL;
RIOSubMap *bd = (RIOSubMap *)entry->data;
if (r_io_submap_to (bd) == r_io_submap_to (sm) &&
r_io_submap_from (bd) >= r_io_submap_from (sm)) {
// _find_entry_submap_node guarantees, that there is no submap
// prior to bd in the range of sm, so instead of deleting and inserting
// we can just memcpy
memcpy (bd, sm, sizeof (RIOSubMap));
free (sm);
r_list_append (bank->maprefs, mapref);
return true;
}
if (r_io_submap_from (bd) < r_io_submap_from (sm) &&
r_io_submap_to (sm) < r_io_submap_to (bd)) {
// split bd into 2 maps => bd and bdsm
RIOSubMap *bdsm = R_NEWCOPY (RIOSubMap, bd);
if (!bdsm) {
free (sm);
free (mapref);
return false;
}
r_io_submap_set_from (bdsm, r_io_submap_to (sm) + 1);
r_io_submap_set_to (bd, r_io_submap_from (sm) - 1);
// TODO: insert and check return value, before adjusting sm size
if (!r_crbtree_insert (bank->submaps, sm, _find_sm_by_from_vaddr_cb, NULL)) {
free (sm);
free (bdsm);
free (mapref);
return false;
}
if (!r_crbtree_insert (bank->submaps, bdsm, _find_sm_by_from_vaddr_cb, NULL)) {
r_crbtree_delete (bank->submaps, sm, _find_sm_by_from_vaddr_cb, NULL);
free (sm);
free (bdsm);
free (mapref);
return false;
}
r_list_append (bank->maprefs, mapref);
return true;
}
// guaranteed intersection
if (r_io_submap_from (bd) < r_io_submap_from (sm)) {
r_io_submap_set_to (bd, r_io_submap_from (sm) - 1);
entry = r_rbnode_next (entry);
}
while (entry && r_io_submap_to (((RIOSubMap *)entry->data)) <= r_io_submap_to (sm)) {
//delete all submaps that are completly included in sm
RRBNode *next = r_rbnode_next (entry);
// this can be optimized, there is no need to do search here
r_crbtree_delete (bank->submaps, entry->data, _find_sm_by_from_vaddr_cb, NULL);
entry = next;
}
if (entry && r_io_submap_from (((RIOSubMap *)entry->data)) <= r_io_submap_to (sm)) {
bd = (RIOSubMap *)entry->data;
r_io_submap_set_from (bd, r_io_submap_to (sm) + 1);
}
if (!r_crbtree_insert (bank->submaps, sm, _find_sm_by_from_vaddr_cb, NULL)) {
free (sm);
free (mapref);
return false;
}
r_list_append (bank->maprefs, mapref);
return true;
}
| null | null | 195,302
|
166117759561842192488491492863600381325
| 89
|
Fix use-after-free in iobank rbtree usage ##io
* See havoc4 bin for reproducer
* Reported via huntr.dev by 'Cen Zhang'
|
other
|
flatpak
|
65cbfac982cb1c83993a9e19aa424daee8e9f042
| 1
|
flatpak_dir_ensure_bundle_remote (FlatpakDir *self,
GFile *file,
GBytes *extra_gpg_data,
FlatpakDecomposed **out_ref,
char **out_checksum,
char **out_metadata,
gboolean *out_created_remote,
GCancellable *cancellable,
GError **error)
{
g_autoptr(FlatpakDecomposed) ref = NULL;
gboolean created_remote = FALSE;
g_autoptr(GBytes) deploy_data = NULL;
g_autoptr(GVariant) metadata = NULL;
g_autofree char *origin = NULL;
g_autofree char *fp_metadata = NULL;
g_autofree char *basename = NULL;
g_autoptr(GBytes) included_gpg_data = NULL;
GBytes *gpg_data = NULL;
g_autofree char *to_checksum = NULL;
g_autofree char *remote = NULL;
g_autofree char *collection_id = NULL;
if (!flatpak_dir_ensure_repo (self, cancellable, error))
return NULL;
metadata = flatpak_bundle_load (file, &to_checksum,
&ref,
&origin,
NULL, &fp_metadata, NULL,
&included_gpg_data,
&collection_id,
error);
if (metadata == NULL)
return NULL;
gpg_data = extra_gpg_data ? extra_gpg_data : included_gpg_data;
deploy_data = flatpak_dir_get_deploy_data (self, ref, FLATPAK_DEPLOY_VERSION_ANY, cancellable, NULL);
if (deploy_data != NULL)
{
remote = g_strdup (flatpak_deploy_data_get_origin (deploy_data));
/* We need to import any gpg keys because otherwise the pull will fail */
if (gpg_data != NULL)
{
g_autoptr(GKeyFile) new_config = NULL;
new_config = ostree_repo_copy_config (flatpak_dir_get_repo (self));
if (!flatpak_dir_modify_remote (self, remote, new_config,
gpg_data, cancellable, error))
return NULL;
}
}
else
{
g_autofree char *id = flatpak_decomposed_dup_id (ref);
/* Add a remote for later updates */
basename = g_file_get_basename (file);
remote = flatpak_dir_create_origin_remote (self,
origin,
id,
basename,
flatpak_decomposed_get_ref (ref),
gpg_data,
collection_id,
&created_remote,
cancellable,
error);
if (remote == NULL)
return NULL;
}
if (out_created_remote)
*out_created_remote = created_remote;
if (out_ref)
*out_ref = g_steal_pointer (&ref);
if (out_checksum)
*out_checksum = g_steal_pointer (&to_checksum);
if (out_metadata)
*out_metadata = g_steal_pointer (&fp_metadata);
return g_steal_pointer (&remote);
}
| null | null | 195,385
|
228799429357940261115009589207592722070
| 89
|
Ensure that bundles have metadata on install
If we have a bundle without metadata we wouldn't properly present
the permissions in the transaction.
|
other
|
v4l2loopback
|
e4cd225557486c420f6a34411f98c575effd43dd
| 1
|
static int vidioc_querycap(struct file *file, void *priv,
struct v4l2_capability *cap)
{
struct v4l2_loopback_device *dev = v4l2loopback_getdevice(file);
int labellen = (sizeof(cap->card) < sizeof(dev->card_label)) ?
sizeof(cap->card) :
sizeof(dev->card_label);
int device_nr =
((struct v4l2loopback_private *)video_get_drvdata(dev->vdev))
->device_nr;
__u32 capabilities = V4L2_CAP_STREAMING | V4L2_CAP_READWRITE;
strlcpy(cap->driver, "v4l2 loopback", sizeof(cap->driver));
snprintf(cap->card, labellen, dev->card_label);
snprintf(cap->bus_info, sizeof(cap->bus_info),
"platform:v4l2loopback-%03d", device_nr);
#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 1, 0)
/* since 3.1.0, the v4l2-core system is supposed to set the version */
cap->version = V4L2LOOPBACK_VERSION_CODE;
#endif
#ifdef V4L2_CAP_VIDEO_M2M
capabilities |= V4L2_CAP_VIDEO_M2M;
#endif /* V4L2_CAP_VIDEO_M2M */
if (dev->announce_all_caps) {
capabilities |= V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_VIDEO_OUTPUT;
} else {
if (dev->ready_for_capture) {
capabilities |= V4L2_CAP_VIDEO_CAPTURE;
}
if (dev->ready_for_output) {
capabilities |= V4L2_CAP_VIDEO_OUTPUT;
}
}
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 7, 0)
dev->vdev->device_caps =
#endif /* >=linux-4.7.0 */
cap->device_caps = cap->capabilities = capabilities;
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 3, 0)
cap->capabilities |= V4L2_CAP_DEVICE_CAPS;
#endif
memset(cap->reserved, 0, sizeof(cap->reserved));
return 0;
}
| null | null | 195,398
|
315173664575668559594492834827107741837
| 49
|
add explicit format specifier to printf() invocations
CWE-134
|
other
|
tensorflow
|
a1e1511dde36b3f8aa27a6ec630838e7ea40e091
| 1
|
int TfLiteIntArrayGetSizeInBytes(int size) {
static TfLiteIntArray dummy;
int computed_size = sizeof(dummy) + sizeof(dummy.data[0]) * size;
#if defined(_MSC_VER)
// Context for why this is needed is in http://b/189926408#comment21
computed_size -= sizeof(dummy.data[0]);
#endif
return computed_size;
}
| null | null | 195,402
|
57995846542337948463298638540979134332
| 10
|
[lite] Update TfLiteIntArrayCreate to return size_t
PiperOrigin-RevId: 416439896
Change-Id: I847f69b68d1ddaff4b1e925a09b8b69c1756653b
|
other
|
hhvm
|
dabd48caf74995e605f1700344f1ff4a5d83441d
| 1
|
bool JSON_parser(Variant &z, const char *p, int length, bool const assoc,
int depth, int64_t options) {
// No GC safepoints during JSON parsing, please. Code is not re-entrant.
NoHandleSurpriseScope no_surprise(SafepointFlags);
json_parser *json = s_json_parser.get(); /* the parser state */
// Clear and reuse the thread-local string buffers. They are only freed if
// they exceed kMaxPersistentStringBufferCapacity at exit or if the thread
// is explicitly flushed (e.g., due to being idle).
json->initSb(length);
SCOPE_EXIT {
constexpr int kMaxPersistentStringBufferCapacity = 256 * 1024;
if (json->sb_cap > kMaxPersistentStringBufferCapacity) json->flushSb();
};
// SimpleParser only handles the most common set of options. Also, only use it
// if its array nesting depth check is *more* restrictive than what the user
// asks for, to ensure that the precise semantics of the general case is
// applied for all nesting overflows.
if (assoc &&
options == (options & (k_JSON_FB_LOOSE |
k_JSON_FB_DARRAYS |
k_JSON_FB_DARRAYS_AND_VARRAYS |
k_JSON_FB_HACK_ARRAYS |
k_JSON_FB_THRIFT_SIMPLE_JSON |
k_JSON_FB_LEGACY_HACK_ARRAYS)) &&
depth >= SimpleParser::kMaxArrayDepth &&
length <= RuntimeOption::EvalSimpleJsonMaxLength &&
SimpleParser::TryParse(p, length, json->tl_buffer.tv, z,
get_container_type_from_options(options),
options & k_JSON_FB_THRIFT_SIMPLE_JSON)) {
return true;
}
int b; /* the next character */
int c; /* the next character class */
int s; /* the next state */
int state = 0;
/*<fb>*/
bool const loose = options & k_JSON_FB_LOOSE;
JSONContainerType const container_type =
get_container_type_from_options(options);
int qchr = 0;
int8_t const *byte_class;
int8_t const (*next_state_table)[32];
if (loose) {
byte_class = loose_ascii_class;
next_state_table = loose_state_transition_table;
} else {
byte_class = ascii_class;
next_state_table = state_transition_table;
}
/*</fb>*/
UncheckedBuffer *buf = &json->sb_buf;
UncheckedBuffer *key = &json->sb_key;
DataType type = kInvalidDataType;
unsigned short escaped_bytes = 0;
auto reset_type = [&] { type = kInvalidDataType; };
json->depth = depth;
// Since the stack is maintainined on a per request basis, for performance
// reasons, it only makes sense to expand if necessary and cycles are wasted
// contracting. Calls with a depth other than default should be rare.
if (depth > json->stack.size()) {
json->stack.resize(depth);
}
SCOPE_EXIT {
if (json->stack.empty()) return;
for (int i = 0; i <= json->mark; i++) {
json->stack[i].key.reset();
json->stack[i].val.unset();
}
json->mark = -1;
};
json->mark = json->top = -1;
push(json, Mode::DONE);
UTF8To16Decoder decoder(p, length, loose);
for (;;) {
b = decoder.decode();
// Fast-case most common transition: append a simple string character.
if (state == 3 && type == KindOfString) {
while (b != '\"' && b != '\\' && b != '\'' && b <= 127 && b >= ' ') {
buf->append((char)b);
b = decoder.decode();
}
}
if (b == UTF8_END) break; // UTF-8 decoding finishes successfully.
if (b == UTF8_ERROR) {
s_json_parser->error_code = JSON_ERROR_UTF8;
return false;
}
assertx(b >= 0);
if ((b & 127) == b) {
/*<fb>*/
c = byte_class[b];
/*</fb>*/
if (c <= S_ERR) {
s_json_parser->error_code = JSON_ERROR_CTRL_CHAR;
return false;
}
} else {
c = S_ETC;
}
/*
Get the next state from the transition table.
*/
/*<fb>*/
s = next_state_table[state][c];
if (s == -4) {
if (b != qchr) {
s = 3;
} else {
qchr = 0;
}
}
/*</fb>*/
if (s < 0) {
/*
Perform one of the predefined actions.
*/
switch (s) {
/*
empty }
*/
case -9:
/*<fb>*/
if (json->top == 1) z = json->stack[json->top].val;
else {
/*</fb>*/
attach_zval(json, json->stack[json->top].key, assoc, container_type);
/*<fb>*/
}
/*</fb>*/
if (!pop(json, Mode::KEY)) {
return false;
}
state = 9;
break;
/*
{
*/
case -8:
if (!push(json, Mode::KEY)) {
s_json_parser->error_code = JSON_ERROR_DEPTH;
return false;
}
state = 1;
if (json->top > 0) {
Variant &top = json->stack[json->top].val;
/*<fb>*/
if (container_type == JSONContainerType::COLLECTIONS) {
// stable_maps is meaningless
top = req::make<c_Map>();
} else {
/*</fb>*/
if (!assoc) {
top = SystemLib::AllocStdClassObject();
/* <fb> */
} else if (container_type == JSONContainerType::HACK_ARRAYS) {
top = Array::CreateDict();
} else if (container_type == JSONContainerType::DARRAYS ||
container_type == JSONContainerType::DARRAYS_AND_VARRAYS)
{
top = Array::CreateDArray();
/* </fb> */
} else if (
container_type == JSONContainerType::LEGACY_HACK_ARRAYS) {
auto arr = staticEmptyDictArray()->copy();
arr->setLegacyArray(true);
top = arr;
} else {
top = Array::CreateDArray();
}
/*<fb>*/
}
/*</fb>*/
json->stack[json->top].key = copy_and_clear(*key);
reset_type();
}
break;
/*
}
*/
case -7:
/*** BEGIN Facebook: json_utf8_loose ***/
/*
If this is a trailing comma in an object definition,
we're in Mode::KEY. In that case, throw that off the
stack and restore Mode::OBJECT so that we pretend the
trailing comma just didn't happen.
*/
if (loose) {
if (pop(json, Mode::KEY)) {
push(json, Mode::OBJECT);
}
}
/*** END Facebook: json_utf8_loose ***/
if (type != kInvalidDataType &&
json->stack[json->top].mode == Mode::OBJECT) {
Variant mval;
json_create_zval(mval, *buf, type, options);
Variant &top = json->stack[json->top].val;
object_set(json, top, copy_and_clear(*key),
mval, assoc, container_type);
buf->clear();
reset_type();
}
/*<fb>*/
if (json->top == 1) z = json->stack[json->top].val;
else {
/*</fb>*/
attach_zval(json, json->stack[json->top].key,
assoc, container_type);
/*<fb>*/
}
/*</fb>*/
if (!pop(json, Mode::OBJECT)) {
s_json_parser->error_code = JSON_ERROR_STATE_MISMATCH;
return false;
}
state = 9;
break;
/*
[
*/
case -6:
if (!push(json, Mode::ARRAY)) {
s_json_parser->error_code = JSON_ERROR_DEPTH;
return false;
}
state = 2;
if (json->top > 0) {
Variant &top = json->stack[json->top].val;
/*<fb>*/
if (container_type == JSONContainerType::COLLECTIONS) {
top = req::make<c_Vector>();
} else if (container_type == JSONContainerType::HACK_ARRAYS) {
top = Array::CreateVec();
} else if (container_type == JSONContainerType::DARRAYS_AND_VARRAYS) {
top = Array::CreateVArray();
} else if (container_type == JSONContainerType::DARRAYS) {
top = Array::CreateDArray();
} else if (container_type == JSONContainerType::LEGACY_HACK_ARRAYS) {
auto arr = staticEmptyVecArray()->copy();
arr->setLegacyArray(true);
top = arr;
} else {
top = Array::CreateDArray();
}
/*</fb>*/
json->stack[json->top].key = copy_and_clear(*key);
reset_type();
}
break;
/*
]
*/
case -5:
{
if (type != kInvalidDataType &&
json->stack[json->top].mode == Mode::ARRAY) {
Variant mval;
json_create_zval(mval, *buf, type, options);
auto& top = json->stack[json->top].val;
if (container_type == JSONContainerType::COLLECTIONS) {
collections::append(top.getObjectData(), mval.asTypedValue());
} else {
top.asArrRef().append(mval);
}
buf->clear();
reset_type();
}
/*<fb>*/
if (json->top == 1) z = json->stack[json->top].val;
else {
/*</fb>*/
attach_zval(json, json->stack[json->top].key, assoc,
container_type);
/*<fb>*/
}
/*</fb>*/
if (!pop(json, Mode::ARRAY)) {
s_json_parser->error_code = JSON_ERROR_STATE_MISMATCH;
return false;
}
state = 9;
}
break;
/*
"
*/
case -4:
switch (json->stack[json->top].mode) {
case Mode::KEY:
state = 27;
std::swap(buf, key);
reset_type();
break;
case Mode::ARRAY:
case Mode::OBJECT:
state = 9;
break;
case Mode::DONE:
if (type == KindOfString) {
z = copy_and_clear(*buf);
state = 9;
break;
}
/* fall through if not KindOfString */
default:
s_json_parser->error_code = JSON_ERROR_SYNTAX;
return false;
}
break;
/*
,
*/
case -3:
{
Variant mval;
if (type != kInvalidDataType &&
(json->stack[json->top].mode == Mode::OBJECT ||
json->stack[json->top].mode == Mode::ARRAY)) {
json_create_zval(mval, *buf, type, options);
}
switch (json->stack[json->top].mode) {
case Mode::OBJECT:
if (pop(json, Mode::OBJECT) &&
push(json, Mode::KEY)) {
if (type != kInvalidDataType) {
Variant &top = json->stack[json->top].val;
object_set(
json,
top,
copy_and_clear(*key),
mval,
assoc,
container_type
);
}
state = 29;
}
break;
case Mode::ARRAY:
if (type != kInvalidDataType) {
auto& top = json->stack[json->top].val;
if (container_type == JSONContainerType::COLLECTIONS) {
collections::append(top.getObjectData(), mval.asTypedValue());
} else {
top.asArrRef().append(mval);
}
}
state = 28;
break;
default:
s_json_parser->error_code = JSON_ERROR_SYNTAX;
return false;
}
buf->clear();
reset_type();
check_non_safepoint_surprise();
}
break;
/*<fb>*/
/*
: (after unquoted string)
*/
case -10:
if (json->stack[json->top].mode == Mode::KEY) {
state = 27;
std::swap(buf, key);
reset_type();
s = -2;
} else {
s = 3;
break;
}
/*</fb>*/
/*
:
*/
case -2:
if (pop(json, Mode::KEY) && push(json, Mode::OBJECT)) {
state = 28;
break;
}
/*
syntax error
*/
case -1:
s_json_parser->error_code = JSON_ERROR_SYNTAX;
return false;
}
} else {
/*
Change the state and iterate.
*/
bool is_tsimplejson = options & k_JSON_FB_THRIFT_SIMPLE_JSON;
if (type == KindOfString) {
if (/*<fb>*/(/*</fb>*/s == 3/*<fb>*/ || s == 30)/*</fb>*/ &&
state != 8) {
if (state != 4) {
utf16_to_utf8(*buf, b);
} else {
switch (b) {
case 'b': buf->append('\b'); break;
case 't': buf->append('\t'); break;
case 'n': buf->append('\n'); break;
case 'f': buf->append('\f'); break;
case 'r': buf->append('\r'); break;
default:
utf16_to_utf8(*buf, b);
break;
}
}
} else if (s == 6) {
if (UNLIKELY(is_tsimplejson)) {
if (UNLIKELY(b != '0')) {
s_json_parser->error_code = JSON_ERROR_SYNTAX;
return false;
}
escaped_bytes = 0;
} else {
escaped_bytes = dehexchar(b) << 12;
}
} else if (s == 7) {
if (UNLIKELY(is_tsimplejson)) {
if (UNLIKELY(b != '0')) {
s_json_parser->error_code = JSON_ERROR_SYNTAX;
return false;
}
} else {
escaped_bytes += dehexchar(b) << 8;
}
} else if (s == 8) {
escaped_bytes += dehexchar(b) << 4;
} else if (s == 3 && state == 8) {
escaped_bytes += dehexchar(b);
if (UNLIKELY(is_tsimplejson)) {
buf->append((char)escaped_bytes);
} else {
utf16_to_utf8(*buf, escaped_bytes);
}
}
} else if ((type == kInvalidDataType || type == KindOfNull) &&
(c == S_DIG || c == S_ZER)) {
type = KindOfInt64;
buf->append((char)b);
} else if (type == KindOfInt64 && s == 24) {
type = KindOfDouble;
buf->append((char)b);
} else if ((type == kInvalidDataType || type == KindOfNull ||
type == KindOfInt64) &&
c == S_DOT) {
type = KindOfDouble;
buf->append((char)b);
} else if (type != KindOfString && c == S_QUO) {
type = KindOfString;
/*<fb>*/qchr = b;/*</fb>*/
} else if ((type == kInvalidDataType || type == KindOfNull ||
type == KindOfInt64 || type == KindOfDouble) &&
((state == 12 && s == 9) ||
(state == 16 && s == 9))) {
type = KindOfBoolean;
} else if (type == kInvalidDataType && state == 19 && s == 9) {
type = KindOfNull;
} else if (type != KindOfString && c > S_WSP) {
utf16_to_utf8(*buf, b);
}
state = s;
}
}
if (state == 9 && pop(json, Mode::DONE)) {
s_json_parser->error_code = JSON_ERROR_NONE;
return true;
}
s_json_parser->error_code = JSON_ERROR_SYNTAX;
return false;
}
| null | null | 195,549
|
230969768529717165037239975546250419627
| 499
|
Fix a json_decode crash when depth==0
Summary:
Setting depth=0 is an error, and should result in NULL, but we weren't
checking for it, so in the case of a single, top-level string, we
would reading the -1th element of the stack.
Differential Revision: D19609959
fbshipit-source-id: 04ca1e0965e04b44df2d5c806a73c3da99ff66fb
|
other
|
hhvm
|
1888810e77b446a79a7674784d5f139fcfa605e2
| 1
|
bool WddxPacket::recursiveAddVar(const String& varName,
const Variant& varVariant,
bool hasVarTag) {
bool isArray = varVariant.isArray();
bool isObject = varVariant.isObject();
if (isArray || isObject) {
if (hasVarTag) {
m_packetString.append("<var name='");
m_packetString.append(varName.data());
m_packetString.append("'>");
}
Array varAsArray;
Object varAsObject = varVariant.toObject();
if (isArray) varAsArray = varVariant.toArray();
if (isObject) varAsArray = varAsObject.toArray();
int length = varAsArray.length();
if (length > 0) {
ArrayIter it = ArrayIter(varAsArray);
if (it.first().isString()) isObject = true;
if (isObject) {
m_packetString.append("<struct>");
if (!isArray) {
m_packetString.append("<var name='php_class_name'><string>");
m_packetString.append(varAsObject->getClassName());
m_packetString.append("</string></var>");
}
} else {
m_packetString.append("<array length='");
m_packetString.append(std::to_string(length));
m_packetString.append("'>");
}
for (ArrayIter it(varAsArray); it; ++it) {
Variant key = it.first();
Variant value = it.second();
recursiveAddVar(key.toString(), value, isObject);
}
if (isObject) {
m_packetString.append("</struct>");
}
else {
m_packetString.append("</array>");
}
}
else {
//empty object
if (isObject) {
m_packetString.append("<struct>");
if (!isArray) {
m_packetString.append("<var name='php_class_name'><string>");
m_packetString.append(varAsObject->getClassName());
m_packetString.append("</string></var>");
}
m_packetString.append("</struct>");
}
}
if (hasVarTag) {
m_packetString.append("</var>");
}
return true;
}
String varType = getDataTypeString(varVariant.getType());
if (!getWddxEncoded(varType, "", varName, false).empty()) {
String varValue;
if (varType.compare("boolean") == 0) {
varValue = varVariant.toBoolean() ? "true" : "false";
} else {
varValue = StringUtil::HtmlEncode(varVariant.toString(),
StringUtil::QuoteStyle::Double,
"UTF-8", false, false).toCppString();
}
m_packetString.append(
getWddxEncoded(varType, varValue, varName, hasVarTag));
return true;
}
return false;
}
| null | null | 195,551
|
257002953542231292524849386209084454732
| 82
|
Fix infinite recursion in wddx
Summary:
It wasn't checking for infinite recursion due to references or self-referential
objects. As it turns out closures always return themselves when converted to an
array. Raising a warning and returning is how PHP-src deals with this problem,
nothing special is done for closures.
Reviewed By: alexmalyshev
Differential Revision: D3465655
fbshipit-source-id: a42bc34d30cf4825faf33596139c0c05f8e4f5f1
|
other
|
pjproject
|
856f87c2e97a27b256482dbe0d748b1194355a21
| 1
|
static pj_xml_node *xml_parse_node( pj_pool_t *pool, pj_scanner *scanner)
{
pj_xml_node *node;
pj_str_t end_name;
PJ_CHECK_STACK();
if (*scanner->curptr != '<')
on_syntax_error(scanner);
/* Handle Processing Instructino (PI) construct (i.e. "<?") */
if (*scanner->curptr == '<' && *(scanner->curptr+1) == '?') {
pj_scan_advance_n(scanner, 2, PJ_FALSE);
for (;;) {
pj_str_t dummy;
pj_scan_get_until_ch(scanner, '?', &dummy);
if (*scanner->curptr=='?' && *(scanner->curptr+1)=='>') {
pj_scan_advance_n(scanner, 2, PJ_TRUE);
break;
} else {
pj_scan_advance_n(scanner, 1, PJ_FALSE);
}
}
return xml_parse_node(pool, scanner);
}
/* Handle comments construct (i.e. "<!") */
if (pj_scan_strcmp(scanner, "<!", 2) == 0) {
pj_scan_advance_n(scanner, 2, PJ_FALSE);
for (;;) {
pj_str_t dummy;
pj_scan_get_until_ch(scanner, '>', &dummy);
if (pj_scan_strcmp(scanner, ">", 1) == 0) {
pj_scan_advance_n(scanner, 1, PJ_TRUE);
break;
} else {
pj_scan_advance_n(scanner, 1, PJ_FALSE);
}
}
return xml_parse_node(pool, scanner);
}
/* Alloc node. */
node = alloc_node(pool);
/* Get '<' */
pj_scan_get_char(scanner);
/* Get node name. */
pj_scan_get_until_chr( scanner, " />\t\r\n", &node->name);
/* Get attributes. */
while (*scanner->curptr != '>' && *scanner->curptr != '/') {
pj_xml_attr *attr = alloc_attr(pool);
pj_scan_get_until_chr( scanner, "=> \t\r\n", &attr->name);
if (*scanner->curptr == '=') {
pj_scan_get_char( scanner );
pj_scan_get_quotes(scanner, "\"'", "\"'", 2, &attr->value);
/* remove quote characters */
++attr->value.ptr;
attr->value.slen -= 2;
}
pj_list_push_back( &node->attr_head, attr );
}
if (*scanner->curptr == '/') {
pj_scan_get_char(scanner);
if (pj_scan_get_char(scanner) != '>')
on_syntax_error(scanner);
return node;
}
/* Enclosing bracket. */
if (pj_scan_get_char(scanner) != '>')
on_syntax_error(scanner);
/* Sub nodes. */
while (*scanner->curptr == '<' && *(scanner->curptr+1) != '/'
&& *(scanner->curptr+1) != '!')
{
pj_xml_node *sub_node = xml_parse_node(pool, scanner);
pj_list_push_back( &node->node_head, sub_node );
}
/* Content. */
if (!pj_scan_is_eof(scanner) && *scanner->curptr != '<') {
pj_scan_get_until_ch(scanner, '<', &node->content);
}
/* CDATA content. */
if (*scanner->curptr == '<' && *(scanner->curptr+1) == '!' &&
pj_scan_strcmp(scanner, "<![CDATA[", 9) == 0)
{
pj_scan_advance_n(scanner, 9, PJ_FALSE);
pj_scan_get_until_ch(scanner, ']', &node->content);
while (pj_scan_strcmp(scanner, "]]>", 3)) {
pj_str_t dummy;
pj_scan_get_until_ch(scanner, ']', &dummy);
}
node->content.slen = scanner->curptr - node->content.ptr;
pj_scan_advance_n(scanner, 3, PJ_TRUE);
}
/* Enclosing node. */
if (pj_scan_get_char(scanner) != '<' || pj_scan_get_char(scanner) != '/')
on_syntax_error(scanner);
pj_scan_get_until_chr(scanner, " \t>", &end_name);
/* Compare name. */
if (pj_stricmp(&node->name, &end_name) != 0)
on_syntax_error(scanner);
/* Enclosing '>' */
if (pj_scan_get_char(scanner) != '>')
on_syntax_error(scanner);
return node;
}
| null | null | 195,670
|
137312951595763938703339557990324963725
| 121
|
Merge pull request from GHSA-5x45-qp78-g4p4
* Prevent infinite loop in scanning xml content
* Simplify scanning method
* Optimization
|
other
|
glibc
|
23e0e8f5f1fb5ed150253d986ecccdc90c2dcd5e
| 1
|
__getcwd_generic (char *buf, size_t size)
{
/* Lengths of big file name components and entire file names, and a
deep level of file name nesting. These numbers are not upper
bounds; they are merely large values suitable for initial
allocations, designed to be large enough for most real-world
uses. */
enum
{
BIG_FILE_NAME_COMPONENT_LENGTH = 255,
BIG_FILE_NAME_LENGTH = MIN (4095, PATH_MAX - 1),
DEEP_NESTING = 100
};
#if HAVE_OPENAT_SUPPORT
int fd = AT_FDCWD;
bool fd_needs_closing = false;
#else
char dots[DEEP_NESTING * sizeof ".." + BIG_FILE_NAME_COMPONENT_LENGTH + 1];
char *dotlist = dots;
size_t dotsize = sizeof dots;
size_t dotlen = 0;
#endif
DIR *dirstream = NULL;
dev_t rootdev, thisdev;
ino_t rootino, thisino;
char *dir;
register char *dirp;
struct __stat64_t64 st;
size_t allocated = size;
size_t used;
#if HAVE_MINIMALLY_WORKING_GETCWD
/* If AT_FDCWD is not defined, the algorithm below is O(N**2) and
this is much slower than the system getcwd (at least on
GNU/Linux). So trust the system getcwd's results unless they
look suspicious.
Use the system getcwd even if we have openat support, since the
system getcwd works even when a parent is unreadable, while the
openat-based approach does not.
But on AIX 5.1..7.1, the system getcwd is not even minimally
working: If the current directory name is slightly longer than
PATH_MAX, it omits the first directory component and returns
this wrong result with errno = 0. */
# undef getcwd
dir = getcwd_system (buf, size);
if (dir || (size && errno == ERANGE))
return dir;
/* Solaris getcwd (NULL, 0) fails with errno == EINVAL, but it has
internal magic that lets it work even if an ancestor directory is
inaccessible, which is better in many cases. So in this case try
again with a buffer that's almost always big enough. */
if (errno == EINVAL && buf == NULL && size == 0)
{
char big_buffer[BIG_FILE_NAME_LENGTH + 1];
dir = getcwd_system (big_buffer, sizeof big_buffer);
if (dir)
return strdup (dir);
}
# if HAVE_PARTLY_WORKING_GETCWD
/* The system getcwd works, except it sometimes fails when it
shouldn't, setting errno to ERANGE, ENAMETOOLONG, or ENOENT. */
if (errno != ERANGE && errno != ENAMETOOLONG && errno != ENOENT)
return NULL;
# endif
#endif
if (size == 0)
{
if (buf != NULL)
{
__set_errno (EINVAL);
return NULL;
}
allocated = BIG_FILE_NAME_LENGTH + 1;
}
if (buf == NULL)
{
dir = malloc (allocated);
if (dir == NULL)
return NULL;
}
else
dir = buf;
dirp = dir + allocated;
*--dirp = '\0';
if (__lstat64_time64 (".", &st) < 0)
goto lose;
thisdev = st.st_dev;
thisino = st.st_ino;
if (__lstat64_time64 ("/", &st) < 0)
goto lose;
rootdev = st.st_dev;
rootino = st.st_ino;
while (!(thisdev == rootdev && thisino == rootino))
{
struct dirent64 *d;
dev_t dotdev;
ino_t dotino;
bool mount_point;
int parent_status;
size_t dirroom;
size_t namlen;
bool use_d_ino = true;
/* Look at the parent directory. */
#if HAVE_OPENAT_SUPPORT
fd = __openat64 (fd, "..", O_RDONLY);
if (fd < 0)
goto lose;
fd_needs_closing = true;
parent_status = __fstat64_time64 (fd, &st);
#else
dotlist[dotlen++] = '.';
dotlist[dotlen++] = '.';
dotlist[dotlen] = '\0';
parent_status = __lstat64_time64 (dotlist, &st);
#endif
if (parent_status != 0)
goto lose;
if (dirstream && __closedir (dirstream) != 0)
{
dirstream = NULL;
goto lose;
}
/* Figure out if this directory is a mount point. */
dotdev = st.st_dev;
dotino = st.st_ino;
mount_point = dotdev != thisdev;
/* Search for the last directory. */
#if HAVE_OPENAT_SUPPORT
dirstream = __fdopendir (fd);
if (dirstream == NULL)
goto lose;
fd_needs_closing = false;
#else
dirstream = __opendir (dotlist);
if (dirstream == NULL)
goto lose;
dotlist[dotlen++] = '/';
#endif
for (;;)
{
/* Clear errno to distinguish EOF from error if readdir returns
NULL. */
__set_errno (0);
d = __readdir64 (dirstream);
/* When we've iterated through all directory entries without finding
one with a matching d_ino, rewind the stream and consider each
name again, but this time, using lstat. This is necessary in a
chroot on at least one system (glibc-2.3.6 + linux 2.6.12), where
.., ../.., ../../.., etc. all had the same device number, yet the
d_ino values for entries in / did not match those obtained
via lstat. */
if (d == NULL && errno == 0 && use_d_ino)
{
use_d_ino = false;
__rewinddir (dirstream);
d = __readdir64 (dirstream);
}
if (d == NULL)
{
if (errno == 0)
/* EOF on dirstream, which can mean e.g., that the current
directory has been removed. */
__set_errno (ENOENT);
goto lose;
}
if (d->d_name[0] == '.' &&
(d->d_name[1] == '\0' ||
(d->d_name[1] == '.' && d->d_name[2] == '\0')))
continue;
if (use_d_ino)
{
bool match = (MATCHING_INO (d, thisino) || mount_point);
if (! match)
continue;
}
{
int entry_status;
#if HAVE_OPENAT_SUPPORT
entry_status = __fstatat64_time64 (fd, d->d_name, &st,
AT_SYMLINK_NOFOLLOW);
#else
/* Compute size needed for this file name, or for the file
name ".." in the same directory, whichever is larger.
Room for ".." might be needed the next time through
the outer loop. */
size_t name_alloc = _D_ALLOC_NAMLEN (d);
size_t filesize = dotlen + MAX (sizeof "..", name_alloc);
if (filesize < dotlen)
goto memory_exhausted;
if (dotsize < filesize)
{
/* My, what a deep directory tree you have, Grandma. */
size_t newsize = MAX (filesize, dotsize * 2);
size_t i;
if (newsize < dotsize)
goto memory_exhausted;
if (dotlist != dots)
free (dotlist);
dotlist = malloc (newsize);
if (dotlist == NULL)
goto lose;
dotsize = newsize;
i = 0;
do
{
dotlist[i++] = '.';
dotlist[i++] = '.';
dotlist[i++] = '/';
}
while (i < dotlen);
}
memcpy (dotlist + dotlen, d->d_name, _D_ALLOC_NAMLEN (d));
entry_status = __lstat64_time64 (dotlist, &st);
#endif
/* We don't fail here if we cannot stat() a directory entry.
This can happen when (network) file systems fail. If this
entry is in fact the one we are looking for we will find
out soon as we reach the end of the directory without
having found anything. */
if (entry_status == 0 && S_ISDIR (st.st_mode)
&& st.st_dev == thisdev && st.st_ino == thisino)
break;
}
}
dirroom = dirp - dir;
namlen = _D_EXACT_NAMLEN (d);
if (dirroom <= namlen)
{
if (size != 0)
{
__set_errno (ERANGE);
goto lose;
}
else
{
char *tmp;
size_t oldsize = allocated;
allocated += MAX (allocated, namlen);
if (allocated < oldsize
|| ! (tmp = realloc (dir, allocated)))
goto memory_exhausted;
/* Move current contents up to the end of the buffer.
This is guaranteed to be non-overlapping. */
dirp = memcpy (tmp + allocated - (oldsize - dirroom),
tmp + dirroom,
oldsize - dirroom);
dir = tmp;
}
}
dirp -= namlen;
memcpy (dirp, d->d_name, namlen);
*--dirp = '/';
thisdev = dotdev;
thisino = dotino;
}
if (dirstream && __closedir (dirstream) != 0)
{
dirstream = NULL;
goto lose;
}
if (dirp == &dir[allocated - 1])
*--dirp = '/';
#if ! HAVE_OPENAT_SUPPORT
if (dotlist != dots)
free (dotlist);
#endif
used = dir + allocated - dirp;
memmove (dir, dirp, used);
if (size == 0)
/* Ensure that the buffer is only as large as necessary. */
buf = (used < allocated ? realloc (dir, used) : dir);
if (buf == NULL)
/* Either buf was NULL all along, or 'realloc' failed but
we still have the original string. */
buf = dir;
return buf;
memory_exhausted:
__set_errno (ENOMEM);
lose:
{
int save = errno;
if (dirstream)
__closedir (dirstream);
#if HAVE_OPENAT_SUPPORT
if (fd_needs_closing)
__close_nocancel_nostatus (fd);
#else
if (dotlist != dots)
free (dotlist);
#endif
if (buf == NULL)
free (dir);
__set_errno (save);
}
return NULL;
}
| null | null | 195,716
|
204737856749159993041789176797064964819
| 333
|
getcwd: Set errno to ERANGE for size == 1 (CVE-2021-3999)
No valid path returned by getcwd would fit into 1 byte, so reject the
size early and return NULL with errno set to ERANGE. This change is
prompted by CVE-2021-3999, which describes a single byte buffer
underflow and overflow when all of the following conditions are met:
- The buffer size (i.e. the second argument of getcwd) is 1 byte
- The current working directory is too long
- '/' is also mounted on the current working directory
Sequence of events:
- In sysdeps/unix/sysv/linux/getcwd.c, the syscall returns ENAMETOOLONG
because the linux kernel checks for name length before it checks
buffer size
- The code falls back to the generic getcwd in sysdeps/posix
- In the generic func, the buf[0] is set to '\0' on line 250
- this while loop on line 262 is bypassed:
while (!(thisdev == rootdev && thisino == rootino))
since the rootfs (/) is bind mounted onto the directory and the flow
goes on to line 449, where it puts a '/' in the byte before the
buffer.
- Finally on line 458, it moves 2 bytes (the underflowed byte and the
'\0') to the buf[0] and buf[1], resulting in a 1 byte buffer overflow.
- buf is returned on line 469 and errno is not set.
This resolves BZ #28769.
Reviewed-by: Andreas Schwab <[email protected]>
Reviewed-by: Adhemerval Zanella <[email protected]>
Signed-off-by: Qualys Security Advisory <[email protected]>
Signed-off-by: Siddhesh Poyarekar <[email protected]>
|
other
|
mvfst
|
a67083ff4b8dcbb7ee2839da6338032030d712b0
| 1
|
void updateHandshakeState(QuicServerConnectionState& conn) {
// Zero RTT read cipher is available after chlo is processed with the
// condition that early data attempt is accepted.
auto handshakeLayer = conn.serverHandshakeLayer;
auto zeroRttReadCipher = handshakeLayer->getZeroRttReadCipher();
auto zeroRttHeaderCipher = handshakeLayer->getZeroRttReadHeaderCipher();
// One RTT write cipher is available at Fizz layer after chlo is processed.
// However, the cipher is only exported to QUIC if early data attempt is
// accepted. Otherwise, the cipher will be available after cfin is
// processed.
auto oneRttWriteCipher = handshakeLayer->getOneRttWriteCipher();
// One RTT read cipher is available after cfin is processed.
auto oneRttReadCipher = handshakeLayer->getOneRttReadCipher();
auto oneRttWriteHeaderCipher = handshakeLayer->getOneRttWriteHeaderCipher();
auto oneRttReadHeaderCipher = handshakeLayer->getOneRttReadHeaderCipher();
if (zeroRttReadCipher) {
if (conn.qLogger) {
conn.qLogger->addTransportStateUpdate(kDerivedZeroRttReadCipher);
}
QUIC_TRACE(fst_trace, conn, "derived 0-rtt read cipher");
conn.readCodec->setZeroRttReadCipher(std::move(zeroRttReadCipher));
}
if (zeroRttHeaderCipher) {
conn.readCodec->setZeroRttHeaderCipher(std::move(zeroRttHeaderCipher));
}
if (oneRttWriteHeaderCipher) {
conn.oneRttWriteHeaderCipher = std::move(oneRttWriteHeaderCipher);
}
if (oneRttReadHeaderCipher) {
conn.readCodec->setOneRttHeaderCipher(std::move(oneRttReadHeaderCipher));
}
if (oneRttWriteCipher) {
if (conn.qLogger) {
conn.qLogger->addTransportStateUpdate(kDerivedOneRttWriteCipher);
}
QUIC_TRACE(fst_trace, conn, "derived 1-rtt write cipher");
CHECK(!conn.oneRttWriteCipher.get());
conn.oneRttWriteCipher = std::move(oneRttWriteCipher);
updatePacingOnKeyEstablished(conn);
// We negotiate the transport parameters whenever we have the 1-RTT write
// keys available.
auto clientParams = handshakeLayer->getClientTransportParams();
if (!clientParams) {
throw QuicTransportException(
"No client transport params",
TransportErrorCode::TRANSPORT_PARAMETER_ERROR);
}
processClientInitialParams(conn, std::move(*clientParams));
}
if (oneRttReadCipher) {
if (conn.qLogger) {
conn.qLogger->addTransportStateUpdate(kDerivedOneRttReadCipher);
}
QUIC_TRACE(fst_trace, conn, "derived 1-rtt read cipher");
// Clear limit because CFIN is received at this point
conn.writableBytesLimit = folly::none;
conn.readCodec->setOneRttReadCipher(std::move(oneRttReadCipher));
}
auto handshakeReadCipher = handshakeLayer->getHandshakeReadCipher();
auto handshakeReadHeaderCipher =
handshakeLayer->getHandshakeReadHeaderCipher();
if (handshakeReadCipher) {
CHECK(handshakeReadHeaderCipher);
conn.readCodec->setHandshakeReadCipher(std::move(handshakeReadCipher));
conn.readCodec->setHandshakeHeaderCipher(
std::move(handshakeReadHeaderCipher));
}
if (handshakeLayer->isHandshakeDone()) {
CHECK(conn.oneRttWriteCipher);
if (conn.version != QuicVersion::MVFST_D24 && !conn.sentHandshakeDone) {
sendSimpleFrame(conn, HandshakeDoneFrame());
conn.sentHandshakeDone = true;
}
}
}
| null | null | 195,720
|
181509801354210897629105034468408496700
| 80
|
Close connection if we derive an extra 1-rtt write cipher
Summary: Fixes CVE-2021-24029
Reviewed By: mjoras, lnicco
Differential Revision: D26613890
fbshipit-source-id: 19bb2be2c731808144e1a074ece313fba11f1945
|
other
|
tensorflow
|
02cc160e29d20631de3859c6653184e3f876b9d7
| 1
|
void MakeDataset(OpKernelContext* ctx, DatasetBase** output) override {
// Create a new SparseTensorSliceDatasetOp::Dataset, insert it in
// the step container, and return it as the output.
const Tensor* indices;
OP_REQUIRES_OK(ctx, ctx->input("indices", &indices));
const Tensor* values;
OP_REQUIRES_OK(ctx, ctx->input("values", &values));
const Tensor* dense_shape;
OP_REQUIRES_OK(ctx, ctx->input("dense_shape", &dense_shape));
OP_REQUIRES(ctx, TensorShapeUtils::IsMatrix(indices->shape()),
errors::InvalidArgument(
"Input indices should be a matrix but received shape ",
indices->shape().DebugString()));
OP_REQUIRES(ctx, TensorShapeUtils::IsVector(values->shape()),
errors::InvalidArgument(
"Input values should be a vector but received shape ",
indices->shape().DebugString()));
OP_REQUIRES(ctx, TensorShapeUtils::IsVector(dense_shape->shape()),
errors::InvalidArgument(
"Input shape should be a vector but received shape ",
dense_shape->shape().DebugString()));
// We currently ensure that `sparse_tensor` is ordered in the
// batch dimension.
// TODO(mrry): Investigate ways to avoid this unconditional check
// if we can be sure that the sparse tensor was produced in an
// appropriate order (e.g. by `tf.parse_example()` or a Dataset
// that batches elements into rows of a SparseTensor).
int64_t previous_batch_index = -1;
for (int64_t i = 0; i < indices->dim_size(0); ++i) {
int64_t next_batch_index = indices->matrix<int64>()(i, 0);
OP_REQUIRES(
ctx, next_batch_index >= previous_batch_index,
errors::Unimplemented("The SparseTensor must be ordered in the batch "
"dimension; handling arbitrarily ordered input "
"is not currently supported."));
previous_batch_index = next_batch_index;
}
gtl::InlinedVector<int64, 8> std_order(dense_shape->NumElements(), 0);
sparse::SparseTensor tensor;
OP_REQUIRES_OK(
ctx, sparse::SparseTensor::Create(
*indices, *values, TensorShape(dense_shape->vec<int64>()),
std_order, &tensor));
*output = new Dataset<T>(ctx, std::move(tensor));
}
| null | null | 195,752
|
171363723210961322415209119251133937799
| 47
|
Prevent nullptr deref in SparseTensorSliceDataset
The arguments must determine a valid sparse tensor. This means that when indices are empty then the values must be empty too (and the reverse).
Also added test, by modifying existing test with empty sparse tensor to now run with an invalid sparse tensor input.
PiperOrigin-RevId: 388562757
Change-Id: Id8b54cd7c2316025b4f9a77292c8fb5344d17609
|
other
|
php-src
|
0c8a2a2cd1056b7dc403eacb5d2c0eec6ce47c6f
| 1
|
*/
static void php_wddx_pop_element(void *user_data, const XML_Char *name)
{
st_entry *ent1, *ent2;
wddx_stack *stack = (wddx_stack *)user_data;
HashTable *target_hash;
zend_class_entry *pce;
zval obj;
/* OBJECTS_FIXME */
if (stack->top == 0) {
return;
}
if (!strcmp((char *)name, EL_STRING) || !strcmp((char *)name, EL_NUMBER) ||
!strcmp((char *)name, EL_BOOLEAN) || !strcmp((char *)name, EL_NULL) ||
!strcmp((char *)name, EL_ARRAY) || !strcmp((char *)name, EL_STRUCT) ||
!strcmp((char *)name, EL_RECORDSET) || !strcmp((char *)name, EL_BINARY) ||
!strcmp((char *)name, EL_DATETIME)) {
wddx_stack_top(stack, (void**)&ent1);
if (Z_TYPE(ent1->data) == IS_UNDEF) {
if (stack->top > 1) {
stack->top--;
} else {
stack->done = 1;
}
efree(ent1);
return;
}
if (!strcmp((char *)name, EL_BINARY)) {
zend_string *new_str = php_base64_decode(
(unsigned char *)Z_STRVAL(ent1->data), Z_STRLEN(ent1->data));
zval_ptr_dtor(&ent1->data);
ZVAL_STR(&ent1->data, new_str);
}
/* Call __wakeup() method on the object. */
if (Z_TYPE(ent1->data) == IS_OBJECT) {
zval fname, retval;
ZVAL_STRING(&fname, "__wakeup");
call_user_function_ex(NULL, &ent1->data, &fname, &retval, 0, 0, 0, NULL);
zval_ptr_dtor(&fname);
zval_ptr_dtor(&retval);
}
if (stack->top > 1) {
stack->top--;
wddx_stack_top(stack, (void**)&ent2);
/* if non-existent field */
if (ent2->type == ST_FIELD && Z_ISUNDEF(ent2->data)) {
zval_ptr_dtor(&ent1->data);
efree(ent1);
return;
}
if (Z_TYPE(ent2->data) == IS_ARRAY || Z_TYPE(ent2->data) == IS_OBJECT) {
target_hash = HASH_OF(&ent2->data);
if (ent1->varname) {
if (!strcmp(ent1->varname, PHP_CLASS_NAME_VAR) &&
Z_TYPE(ent1->data) == IS_STRING && Z_STRLEN(ent1->data) &&
ent2->type == ST_STRUCT && Z_TYPE(ent2->data) == IS_ARRAY) {
zend_bool incomplete_class = 0;
zend_str_tolower(Z_STRVAL(ent1->data), Z_STRLEN(ent1->data));
zend_string_forget_hash_val(Z_STR(ent1->data));
if ((pce = zend_hash_find_ptr(EG(class_table), Z_STR(ent1->data))) == NULL) {
incomplete_class = 1;
pce = PHP_IC_ENTRY;
}
/* Initialize target object */
object_init_ex(&obj, pce);
/* Merge current hashtable with object's default properties */
zend_hash_merge(Z_OBJPROP(obj),
Z_ARRVAL(ent2->data),
zval_add_ref, 0);
if (incomplete_class) {
php_store_class_name(&obj, Z_STRVAL(ent1->data), Z_STRLEN(ent1->data));
}
/* Clean up old array entry */
zval_ptr_dtor(&ent2->data);
/* Set stack entry to point to the newly created object */
ZVAL_COPY_VALUE(&ent2->data, &obj);
/* Clean up class name var entry */
zval_ptr_dtor(&ent1->data);
} else if (Z_TYPE(ent2->data) == IS_OBJECT) {
zend_class_entry *old_scope = EG(scope);
EG(scope) = Z_OBJCE(ent2->data);
add_property_zval(&ent2->data, ent1->varname, &ent1->data);
if Z_REFCOUNTED(ent1->data) Z_DELREF(ent1->data);
EG(scope) = old_scope;
} else {
zend_symtable_str_update(target_hash, ent1->varname, strlen(ent1->varname), &ent1->data);
}
efree(ent1->varname);
} else {
zend_hash_next_index_insert(target_hash, &ent1->data);
}
}
efree(ent1);
} else {
stack->done = 1;
}
} else if (!strcmp((char *)name, EL_VAR) && stack->varname) {
efree(stack->varname);
stack->varname = NULL;
} else if (!strcmp((char *)name, EL_FIELD)) {
st_entry *ent;
wddx_stack_top(stack, (void **)&ent);
efree(ent);
stack->top--;
}
| null | null | 195,801
|
281179289723329197214540489283702639538
| 125
|
Fix for bug #72790 and bug #72799
(cherry picked from commit a14fdb9746262549bbbb96abb87338bacd147e1b)
Conflicts:
ext/wddx/wddx.c
|
other
|
mongo
|
6518b22420c5bbd92c42caf907671c3a2b140bb6
| 1
|
DocumentSource::GetNextResult DocumentSourceUnionWith::doGetNext() {
if (!_pipeline) {
// We must have already been disposed, so we're finished.
return GetNextResult::makeEOF();
}
if (_executionState == ExecutionProgress::kIteratingSource) {
auto nextInput = pSource->getNext();
if (!nextInput.isEOF()) {
return nextInput;
}
_executionState = ExecutionProgress::kStartingSubPipeline;
// All documents from the base collection have been returned, switch to iterating the sub-
// pipeline by falling through below.
}
if (_executionState == ExecutionProgress::kStartingSubPipeline) {
auto serializedPipe = _pipeline->serializeToBson();
LOGV2_DEBUG(23869,
1,
"$unionWith attaching cursor to pipeline {pipeline}",
"pipeline"_attr = serializedPipe);
// $$SEARCH_META can be set during runtime earlier in the pipeline, and therefore must be
// copied to the subpipeline manually.
if (pExpCtx->variables.hasConstantValue(Variables::kSearchMetaId)) {
_pipeline->getContext()->variables.setReservedValue(
Variables::kSearchMetaId,
pExpCtx->variables.getValue(Variables::kSearchMetaId, Document()),
true);
}
try {
_pipeline =
pExpCtx->mongoProcessInterface->attachCursorSourceToPipeline(_pipeline.release());
_executionState = ExecutionProgress::kIteratingSubPipeline;
} catch (const ExceptionFor<ErrorCodes::CommandOnShardedViewNotSupportedOnMongod>& e) {
_pipeline = buildPipelineFromViewDefinition(
pExpCtx,
ExpressionContext::ResolvedNamespace{e->getNamespace(), e->getPipeline()},
serializedPipe);
LOGV2_DEBUG(4556300,
3,
"$unionWith found view definition. ns: {ns}, pipeline: {pipeline}. New "
"$unionWith sub-pipeline: {new_pipe}",
"ns"_attr = e->getNamespace(),
"pipeline"_attr = Value(e->getPipeline()),
"new_pipe"_attr = _pipeline->serializeToBson());
return doGetNext();
}
}
auto res = _pipeline->getNext();
if (res)
return std::move(*res);
// Record the plan summary stats after $unionWith operation is done.
recordPlanSummaryStats(*_pipeline);
_executionState = ExecutionProgress::kFinished;
return GetNextResult::makeEOF();
}
| null | null | 195,804
|
77189124809087292115725641020447994671
| 60
|
SERVER-58203 factor out logging statements into helper functions
|
other
|
lsquic
|
a74702c630e108125e71898398737baec8f02238
| 1
|
lsquic_qeh_settings (struct qpack_enc_hdl *qeh, unsigned max_table_size,
unsigned dyn_table_size, unsigned max_risked_streams, int server)
{
enum lsqpack_enc_opts enc_opts;
assert(qeh->qeh_flags & QEH_INITIALIZED);
if (qeh->qeh_flags & QEH_HAVE_SETTINGS)
{
LSQ_WARN("settings already set");
return -1;
}
enc_opts = LSQPACK_ENC_OPT_STAGE_2
| (server ? LSQPACK_ENC_OPT_SERVER : 0);
qeh->qeh_tsu_sz = sizeof(qeh->qeh_tsu_buf);
if (0 != lsqpack_enc_init(&qeh->qeh_encoder, (void *) qeh->qeh_conn,
max_table_size, dyn_table_size, max_risked_streams, enc_opts,
qeh->qeh_tsu_buf, &qeh->qeh_tsu_sz))
{
LSQ_INFO("could not initialize QPACK encoder");
return -1;
}
LSQ_DEBUG("%zu-byte post-init TSU", qeh->qeh_tsu_sz);
qeh->qeh_flags |= QEH_HAVE_SETTINGS;
qeh->qeh_max_prefix_size =
lsqpack_enc_header_block_prefix_size(&qeh->qeh_encoder);
LSQ_DEBUG("have settings: max table size=%u; dyn table size=%u; max risked "
"streams=%u", max_table_size, dyn_table_size, max_risked_streams);
if (qeh->qeh_enc_sm_out)
qeh_begin_out(qeh);
return 0;
}
| null | null | 196,276
|
288925027585735265622903529553530375426
| 33
|
Release 3.1.0
|
other
|
minetest
|
da71e86633d0b27cd02d7aac9fdac625d141ca13
| 1
|
static inline int checkSettingSecurity(lua_State* L, const std::string &name)
{
if (ScriptApiSecurity::isSecure(L) && name.compare(0, 7, "secure.") == 0)
throw LuaError("Attempt to set secure setting.");
bool is_mainmenu = false;
#ifndef SERVER
is_mainmenu = ModApiBase::getGuiEngine(L) != nullptr;
#endif
if (!is_mainmenu && (name == "mg_name" || name == "mg_flags")) {
errorstream << "Tried to set global setting " << name << ", ignoring. "
"minetest.set_mapgen_setting() should be used instead." << std::endl;
infostream << script_get_backtrace(L) << std::endl;
return -1;
}
return 0;
}
| null | null | 196,670
|
14542184638408056621499358845757545617
| 18
|
Protect a few more settings from being set from mods
Of those settings main_menu_script has concrete security impact, the rest are added out of abundance of caution.
|
other
|
njs
|
81af26364c21c196dd21fb5e14c7fa9ce7debd17
| 1
|
njs_array_convert_to_slow_array(njs_vm_t *vm, njs_array_t *array)
{
uint32_t i, length;
njs_value_t index, value;
njs_object_prop_t *prop;
njs_set_array(&value, array);
array->object.fast_array = 0;
length = array->length;
for (i = 0; i < length; i++) {
if (njs_is_valid(&array->start[i])) {
njs_uint32_to_string(&index, i);
prop = njs_object_property_add(vm, &value, &index, 0);
if (njs_slow_path(prop == NULL)) {
return NJS_ERROR;
}
prop->value = array->start[i];
}
}
/* GC: release value. */
njs_mp_free(vm->mem_pool, array->start);
array->start = NULL;
return NJS_OK;
}
| null | null | 196,817
|
15778839275107817850632511934182345441
| 30
|
Fixed Object.defineProperty() when a recursive descriptor is provided.
This closes #481 issue on Github.
|
other
|
furnace
|
0eb02422d5161767e9983bdaa5c429762d3477ce
| 1
|
inline void FurnaceGUI::patternRow(int i, bool isPlaying, float lineHeight, int chans, int ord, const DivPattern** patCache) {
static char id[32];
bool selectedRow=(i>=sel1.y && i<=sel2.y);
ImGui::TableNextRow(0,lineHeight);
ImGui::TableNextColumn();
float cursorPosY=ImGui::GetCursorPos().y-ImGui::GetScrollY();
// check if the row is visible
if (cursorPosY<-lineHeight || cursorPosY>ImGui::GetWindowSize().y) {
return;
}
// check if we are in range
if (ord<0 || ord>=e->song.ordersLen) {
return;
}
if (i<0 || i>=e->song.patLen) {
return;
}
bool isPushing=false;
ImVec4 activeColor=uiColors[GUI_COLOR_PATTERN_ACTIVE];
ImVec4 inactiveColor=uiColors[GUI_COLOR_PATTERN_INACTIVE];
ImVec4 rowIndexColor=uiColors[GUI_COLOR_PATTERN_ROW_INDEX];
if (e->song.hilightB>0 && !(i%e->song.hilightB)) {
activeColor=uiColors[GUI_COLOR_PATTERN_ACTIVE_HI2];
inactiveColor=uiColors[GUI_COLOR_PATTERN_INACTIVE_HI2];
rowIndexColor=uiColors[GUI_COLOR_PATTERN_ROW_INDEX_HI2];
} else if (e->song.hilightA>0 && !(i%e->song.hilightA)) {
activeColor=uiColors[GUI_COLOR_PATTERN_ACTIVE_HI1];
inactiveColor=uiColors[GUI_COLOR_PATTERN_INACTIVE_HI1];
rowIndexColor=uiColors[GUI_COLOR_PATTERN_ROW_INDEX_HI1];
}
// check overflow highlight
if (settings.overflowHighlight) {
if (edit && cursor.y==i) {
ImGui::TableSetBgColor(ImGuiTableBgTarget_RowBg0,ImGui::GetColorU32(uiColors[GUI_COLOR_EDITING]));
} else if (isPlaying && oldRow==i) {
ImGui::TableSetBgColor(ImGuiTableBgTarget_RowBg0,ImGui::GetColorU32(uiColors[GUI_COLOR_PATTERN_PLAY_HEAD]));
} else if (e->song.hilightB>0 && !(i%e->song.hilightB)) {
ImGui::TableSetBgColor(ImGuiTableBgTarget_RowBg0,ImGui::GetColorU32(uiColors[GUI_COLOR_PATTERN_HI_2]));
} else if (e->song.hilightA>0 && !(i%e->song.hilightA)) {
ImGui::TableSetBgColor(ImGuiTableBgTarget_RowBg0,ImGui::GetColorU32(uiColors[GUI_COLOR_PATTERN_HI_1]));
}
} else {
isPushing=true;
if (edit && cursor.y==i) {
ImGui::PushStyleColor(ImGuiCol_Header,ImGui::GetColorU32(uiColors[GUI_COLOR_EDITING]));
} else if (isPlaying && oldRow==i) {
ImGui::PushStyleColor(ImGuiCol_Header,ImGui::GetColorU32(uiColors[GUI_COLOR_PATTERN_PLAY_HEAD]));
} else if (e->song.hilightB>0 && !(i%e->song.hilightB)) {
ImGui::PushStyleColor(ImGuiCol_Header,ImGui::GetColorU32(uiColors[GUI_COLOR_PATTERN_HI_2]));
} else if (e->song.hilightA>0 && !(i%e->song.hilightA)) {
ImGui::PushStyleColor(ImGuiCol_Header,ImGui::GetColorU32(uiColors[GUI_COLOR_PATTERN_HI_1]));
} else {
isPushing=false;
}
}
// row number
if (settings.patRowsBase==1) {
ImGui::TextColored(rowIndexColor," %.2X ",i);
} else {
ImGui::TextColored(rowIndexColor,"%3d ",i);
}
// for each column
for (int j=0; j<chans; j++) {
// check if channel is not hidden
if (!e->song.chanShow[j]) {
patChanX[j]=ImGui::GetCursorPosX();
continue;
}
int chanVolMax=e->getMaxVolumeChan(j);
if (chanVolMax<1) chanVolMax=1;
const DivPattern* pat=patCache[j];
ImGui::TableNextColumn();
patChanX[j]=ImGui::GetCursorPosX();
// selection highlight flags
int sel1XSum=sel1.xCoarse*32+sel1.xFine;
int sel2XSum=sel2.xCoarse*32+sel2.xFine;
int j32=j*32;
bool selectedNote=selectedRow && (j32>=sel1XSum && j32<=sel2XSum);
bool selectedIns=selectedRow && (j32+1>=sel1XSum && j32+1<=sel2XSum);
bool selectedVol=selectedRow && (j32+2>=sel1XSum && j32+2<=sel2XSum);
bool cursorNote=(cursor.y==i && cursor.xCoarse==j && cursor.xFine==0);
bool cursorIns=(cursor.y==i && cursor.xCoarse==j && cursor.xFine==1);
bool cursorVol=(cursor.y==i && cursor.xCoarse==j && cursor.xFine==2);
// note
sprintf(id,"%s##PN_%d_%d",noteName(pat->data[i][0],pat->data[i][1]),i,j);
if (pat->data[i][0]==0 && pat->data[i][1]==0) {
ImGui::PushStyleColor(ImGuiCol_Text,inactiveColor);
} else {
ImGui::PushStyleColor(ImGuiCol_Text,activeColor);
}
if (cursorNote) {
ImGui::PushStyleColor(ImGuiCol_Header,uiColors[GUI_COLOR_PATTERN_CURSOR]);
ImGui::PushStyleColor(ImGuiCol_HeaderActive,uiColors[GUI_COLOR_PATTERN_CURSOR_ACTIVE]);
ImGui::PushStyleColor(ImGuiCol_HeaderHovered,uiColors[GUI_COLOR_PATTERN_CURSOR_HOVER]);
ImGui::Selectable(id,true,ImGuiSelectableFlags_NoPadWithHalfSpacing,threeChars);
demandX=ImGui::GetCursorPosX();
ImGui::PopStyleColor(3);
} else {
if (selectedNote) ImGui::PushStyleColor(ImGuiCol_Header,uiColors[GUI_COLOR_PATTERN_SELECTION]);
ImGui::Selectable(id,isPushing || selectedNote,ImGuiSelectableFlags_NoPadWithHalfSpacing,threeChars);
if (selectedNote) ImGui::PopStyleColor();
}
if (ImGui::IsItemClicked()) {
startSelection(j,0,i);
}
if (ImGui::IsItemHovered(ImGuiHoveredFlags_AllowWhenBlockedByActiveItem)) {
updateSelection(j,0,i);
}
ImGui::PopStyleColor();
// the following is only visible when the channel is not collapsed
if (!e->song.chanCollapse[j]) {
// instrument
if (pat->data[i][2]==-1) {
ImGui::PushStyleColor(ImGuiCol_Text,inactiveColor);
sprintf(id,"..##PI_%d_%d",i,j);
} else {
if (pat->data[i][2]<0 || pat->data[i][2]>=e->song.insLen) {
ImGui::PushStyleColor(ImGuiCol_Text,uiColors[GUI_COLOR_PATTERN_INS_ERROR]);
} else {
DivInstrumentType t=e->song.ins[pat->data[i][2]]->type;
if (t!=DIV_INS_AMIGA && t!=e->getPreferInsType(j)) {
ImGui::PushStyleColor(ImGuiCol_Text,uiColors[GUI_COLOR_PATTERN_INS_WARN]);
} else {
ImGui::PushStyleColor(ImGuiCol_Text,uiColors[GUI_COLOR_PATTERN_INS]);
}
}
sprintf(id,"%.2X##PI_%d_%d",pat->data[i][2],i,j);
}
ImGui::SameLine(0.0f,0.0f);
if (cursorIns) {
ImGui::PushStyleColor(ImGuiCol_Header,uiColors[GUI_COLOR_PATTERN_CURSOR]);
ImGui::PushStyleColor(ImGuiCol_HeaderActive,uiColors[GUI_COLOR_PATTERN_CURSOR_ACTIVE]);
ImGui::PushStyleColor(ImGuiCol_HeaderHovered,uiColors[GUI_COLOR_PATTERN_CURSOR_HOVER]);
ImGui::Selectable(id,true,ImGuiSelectableFlags_NoPadWithHalfSpacing,twoChars);
demandX=ImGui::GetCursorPosX();
ImGui::PopStyleColor(3);
} else {
if (selectedIns) ImGui::PushStyleColor(ImGuiCol_Header,uiColors[GUI_COLOR_PATTERN_SELECTION]);
ImGui::Selectable(id,isPushing || selectedIns,ImGuiSelectableFlags_NoPadWithHalfSpacing,twoChars);
if (selectedIns) ImGui::PopStyleColor();
}
if (ImGui::IsItemClicked()) {
startSelection(j,1,i);
}
if (ImGui::IsItemHovered(ImGuiHoveredFlags_AllowWhenBlockedByActiveItem)) {
updateSelection(j,1,i);
}
ImGui::PopStyleColor();
// volume
if (pat->data[i][3]==-1) {
sprintf(id,"..##PV_%d_%d",i,j);
ImGui::PushStyleColor(ImGuiCol_Text,inactiveColor);
} else {
int volColor=(pat->data[i][3]*127)/chanVolMax;
if (volColor>127) volColor=127;
if (volColor<0) volColor=0;
sprintf(id,"%.2X##PV_%d_%d",pat->data[i][3],i,j);
ImGui::PushStyleColor(ImGuiCol_Text,volColors[volColor]);
}
ImGui::SameLine(0.0f,0.0f);
if (cursorVol) {
ImGui::PushStyleColor(ImGuiCol_Header,uiColors[GUI_COLOR_PATTERN_CURSOR]);
ImGui::PushStyleColor(ImGuiCol_HeaderActive,uiColors[GUI_COLOR_PATTERN_CURSOR_ACTIVE]);
ImGui::PushStyleColor(ImGuiCol_HeaderHovered,uiColors[GUI_COLOR_PATTERN_CURSOR_HOVER]);
ImGui::Selectable(id,true,ImGuiSelectableFlags_NoPadWithHalfSpacing,twoChars);
demandX=ImGui::GetCursorPosX();
ImGui::PopStyleColor(3);
} else {
if (selectedVol) ImGui::PushStyleColor(ImGuiCol_Header,uiColors[GUI_COLOR_PATTERN_SELECTION]);
ImGui::Selectable(id,isPushing || selectedVol,ImGuiSelectableFlags_NoPadWithHalfSpacing,twoChars);
if (selectedVol) ImGui::PopStyleColor();
}
if (ImGui::IsItemClicked()) {
startSelection(j,2,i);
}
if (ImGui::IsItemHovered(ImGuiHoveredFlags_AllowWhenBlockedByActiveItem)) {
updateSelection(j,2,i);
}
ImGui::PopStyleColor();
// effects
for (int k=0; k<e->song.pat[j].effectRows; k++) {
int index=4+(k<<1);
bool selectedEffect=selectedRow && (j32+index-1>=sel1XSum && j32+index-1<=sel2XSum);
bool selectedEffectVal=selectedRow && (j32+index>=sel1XSum && j32+index<=sel2XSum);
bool cursorEffect=(cursor.y==i && cursor.xCoarse==j && cursor.xFine==index-1);
bool cursorEffectVal=(cursor.y==i && cursor.xCoarse==j && cursor.xFine==index);
// effect
if (pat->data[i][index]==-1) {
sprintf(id,"..##PE%d_%d_%d",k,i,j);
ImGui::PushStyleColor(ImGuiCol_Text,inactiveColor);
} else {
sprintf(id,"%.2X##PE%d_%d_%d",pat->data[i][index],k,i,j);
if (pat->data[i][index]<0x10) {
ImGui::PushStyleColor(ImGuiCol_Text,uiColors[fxColors[pat->data[i][index]]]);
} else if (pat->data[i][index]<0x20) {
ImGui::PushStyleColor(ImGuiCol_Text,uiColors[GUI_COLOR_PATTERN_EFFECT_SYS_PRIMARY]);
} else if (pat->data[i][index]<0x30) {
ImGui::PushStyleColor(ImGuiCol_Text,uiColors[GUI_COLOR_PATTERN_EFFECT_SYS_SECONDARY]);
} else if (pat->data[i][index]<0x48) {
ImGui::PushStyleColor(ImGuiCol_Text,uiColors[GUI_COLOR_PATTERN_EFFECT_SYS_PRIMARY]);
} else if (pat->data[i][index]<0x90) {
ImGui::PushStyleColor(ImGuiCol_Text,uiColors[GUI_COLOR_PATTERN_EFFECT_INVALID]);
} else if (pat->data[i][index]<0xa0) {
ImGui::PushStyleColor(ImGuiCol_Text,uiColors[GUI_COLOR_PATTERN_EFFECT_MISC]);
} else if (pat->data[i][index]<0xc0) {
ImGui::PushStyleColor(ImGuiCol_Text,uiColors[GUI_COLOR_PATTERN_EFFECT_INVALID]);
} else if (pat->data[i][index]<0xd0) {
ImGui::PushStyleColor(ImGuiCol_Text,uiColors[GUI_COLOR_PATTERN_EFFECT_SPEED]);
} else if (pat->data[i][index]<0xe0) {
ImGui::PushStyleColor(ImGuiCol_Text,uiColors[GUI_COLOR_PATTERN_EFFECT_INVALID]);
} else {
ImGui::PushStyleColor(ImGuiCol_Text,uiColors[extFxColors[pat->data[i][index]-0xe0]]);
}
}
ImGui::SameLine(0.0f,0.0f);
if (cursorEffect) {
ImGui::PushStyleColor(ImGuiCol_Header,uiColors[GUI_COLOR_PATTERN_CURSOR]);
ImGui::PushStyleColor(ImGuiCol_HeaderActive,uiColors[GUI_COLOR_PATTERN_CURSOR_ACTIVE]);
ImGui::PushStyleColor(ImGuiCol_HeaderHovered,uiColors[GUI_COLOR_PATTERN_CURSOR_HOVER]);
ImGui::Selectable(id,true,ImGuiSelectableFlags_NoPadWithHalfSpacing,twoChars);
demandX=ImGui::GetCursorPosX();
ImGui::PopStyleColor(3);
} else {
if (selectedEffect) ImGui::PushStyleColor(ImGuiCol_Header,uiColors[GUI_COLOR_PATTERN_SELECTION]);
ImGui::Selectable(id,isPushing || selectedEffect,ImGuiSelectableFlags_NoPadWithHalfSpacing,twoChars);
if (selectedEffect) ImGui::PopStyleColor();
}
if (ImGui::IsItemClicked()) {
startSelection(j,index-1,i);
}
if (ImGui::IsItemHovered(ImGuiHoveredFlags_AllowWhenBlockedByActiveItem)) {
updateSelection(j,index-1,i);
}
// effect value
if (pat->data[i][index+1]==-1) {
sprintf(id,"..##PF%d_%d_%d",k,i,j);
} else {
sprintf(id,"%.2X##PF%d_%d_%d",pat->data[i][index+1],k,i,j);
}
ImGui::SameLine(0.0f,0.0f);
if (cursorEffectVal) {
ImGui::PushStyleColor(ImGuiCol_Header,uiColors[GUI_COLOR_PATTERN_CURSOR]);
ImGui::PushStyleColor(ImGuiCol_HeaderActive,uiColors[GUI_COLOR_PATTERN_CURSOR_ACTIVE]);
ImGui::PushStyleColor(ImGuiCol_HeaderHovered,uiColors[GUI_COLOR_PATTERN_CURSOR_HOVER]);
ImGui::Selectable(id,true,ImGuiSelectableFlags_NoPadWithHalfSpacing,twoChars);
demandX=ImGui::GetCursorPosX();
ImGui::PopStyleColor(3);
} else {
if (selectedEffectVal) ImGui::PushStyleColor(ImGuiCol_Header,uiColors[GUI_COLOR_PATTERN_SELECTION]);
ImGui::Selectable(id,isPushing || selectedEffectVal,ImGuiSelectableFlags_NoPadWithHalfSpacing,twoChars);
if (selectedEffectVal) ImGui::PopStyleColor();
}
if (ImGui::IsItemClicked()) {
startSelection(j,index,i);
}
if (ImGui::IsItemHovered(ImGuiHoveredFlags_AllowWhenBlockedByActiveItem)) {
updateSelection(j,index,i);
}
ImGui::PopStyleColor();
}
}
}
if (isPushing) {
ImGui::PopStyleColor();
}
ImGui::TableNextColumn();
patChanX[chans]=ImGui::GetCursorPosX();
}
| null | null | 196,841
|
136686899264564982027774933242322169613
| 275
|
fix possible pattern crash
issue #325
|
other
|
libjxl
|
7dfa400ded53919d986c5d3d23446a09e0cf481b
| 1
|
Status DecodeImageAPNG(Span<const uint8_t> bytes, ThreadPool* pool,
CodecInOut* io) {
Reader r;
unsigned int id, i, j, w, h, w0, h0, x0, y0;
unsigned int delay_num, delay_den, dop, bop, rowbytes, imagesize;
unsigned char sig[8];
png_structp png_ptr;
png_infop info_ptr;
CHUNK chunk;
CHUNK chunkIHDR;
std::vector<CHUNK> chunksInfo;
bool isAnimated = false;
bool skipFirst = false;
bool hasInfo = false;
bool all_dispose_bg = true;
APNGFrame frameRaw = {};
r = {bytes.data(), bytes.data() + bytes.size()};
// Not an aPNG => not an error
unsigned char png_signature[8] = {137, 80, 78, 71, 13, 10, 26, 10};
if (r.Read(sig, 8) || memcmp(sig, png_signature, 8) != 0) {
return false;
}
id = read_chunk(&r, &chunkIHDR);
io->frames.clear();
io->dec_pixels = 0;
io->metadata.m.SetUintSamples(8);
io->metadata.m.SetAlphaBits(8);
io->metadata.m.color_encoding =
ColorEncoding::SRGB(); // todo: get data from png metadata
(void)io->dec_hints.Foreach(
[](const std::string& key, const std::string& /*value*/) {
JXL_WARNING("APNG decoder ignoring %s hint", key.c_str());
return true;
});
bool errorstate = true;
if (id == kId_IHDR && chunkIHDR.size == 25) {
w0 = w = png_get_uint_32(chunkIHDR.p + 8);
h0 = h = png_get_uint_32(chunkIHDR.p + 12);
if (w > cMaxPNGSize || h > cMaxPNGSize) {
return false;
}
x0 = 0;
y0 = 0;
delay_num = 1;
delay_den = 10;
dop = 0;
bop = 0;
rowbytes = w * 4;
imagesize = h * rowbytes;
frameRaw.p = new unsigned char[imagesize];
frameRaw.rows = new png_bytep[h * sizeof(png_bytep)];
for (j = 0; j < h; j++) frameRaw.rows[j] = frameRaw.p + j * rowbytes;
if (!processing_start(png_ptr, info_ptr, (void*)&frameRaw, hasInfo,
chunkIHDR, chunksInfo)) {
bool last_base_was_none = true;
while (!r.Eof()) {
id = read_chunk(&r, &chunk);
if (!id) break;
JXL_ASSERT(chunk.p != nullptr);
if (id == kId_acTL && !hasInfo && !isAnimated) {
isAnimated = true;
skipFirst = true;
io->metadata.m.have_animation = true;
io->metadata.m.animation.tps_numerator = 1000;
} else if (id == kId_IEND ||
(id == kId_fcTL && (!hasInfo || isAnimated))) {
if (hasInfo) {
if (!processing_finish(png_ptr, info_ptr)) {
ImageBundle bundle(&io->metadata.m);
bundle.duration = delay_num * 1000 / delay_den;
bundle.origin.x0 = x0;
bundle.origin.y0 = y0;
// TODO(veluca): this could in principle be implemented.
if (last_base_was_none && !all_dispose_bg &&
(x0 != 0 || y0 != 0 || w0 != w || h0 != h || bop != 0)) {
return JXL_FAILURE(
"APNG with dispose-to-0 is not supported for non-full or "
"blended frames");
}
switch (dop) {
case 0:
bundle.use_for_next_frame = true;
last_base_was_none = false;
all_dispose_bg = false;
break;
case 2:
bundle.use_for_next_frame = false;
all_dispose_bg = false;
break;
default:
bundle.use_for_next_frame = false;
last_base_was_none = true;
}
bundle.blend = bop != 0;
io->dec_pixels += w0 * h0;
Image3F sub_frame(w0, h0);
ImageF sub_frame_alpha(w0, h0);
for (size_t y = 0; y < h0; ++y) {
float* const JXL_RESTRICT row_r = sub_frame.PlaneRow(0, y);
float* const JXL_RESTRICT row_g = sub_frame.PlaneRow(1, y);
float* const JXL_RESTRICT row_b = sub_frame.PlaneRow(2, y);
float* const JXL_RESTRICT row_alpha = sub_frame_alpha.Row(y);
uint8_t* const f = frameRaw.rows[y];
for (size_t x = 0; x < w0; ++x) {
if (f[4 * x + 3] == 0) {
row_alpha[x] = 0;
row_r[x] = 0;
row_g[x] = 0;
row_b[x] = 0;
continue;
}
row_r[x] = f[4 * x + 0] * (1.f / 255);
row_g[x] = f[4 * x + 1] * (1.f / 255);
row_b[x] = f[4 * x + 2] * (1.f / 255);
row_alpha[x] = f[4 * x + 3] * (1.f / 255);
}
}
bundle.SetFromImage(std::move(sub_frame), ColorEncoding::SRGB());
bundle.SetAlpha(std::move(sub_frame_alpha),
/*alpha_is_premultiplied=*/false);
io->frames.push_back(std::move(bundle));
} else {
delete[] chunk.p;
break;
}
}
if (id == kId_IEND) {
errorstate = false;
break;
}
// At this point the old frame is done. Let's start a new one.
w0 = png_get_uint_32(chunk.p + 12);
h0 = png_get_uint_32(chunk.p + 16);
x0 = png_get_uint_32(chunk.p + 20);
y0 = png_get_uint_32(chunk.p + 24);
delay_num = png_get_uint_16(chunk.p + 28);
delay_den = png_get_uint_16(chunk.p + 30);
dop = chunk.p[32];
bop = chunk.p[33];
if (w0 > cMaxPNGSize || h0 > cMaxPNGSize || x0 > cMaxPNGSize ||
y0 > cMaxPNGSize || x0 + w0 > w || y0 + h0 > h || dop > 2 ||
bop > 1) {
delete[] chunk.p;
break;
}
if (hasInfo) {
memcpy(chunkIHDR.p + 8, chunk.p + 12, 8);
if (processing_start(png_ptr, info_ptr, (void*)&frameRaw, hasInfo,
chunkIHDR, chunksInfo)) {
delete[] chunk.p;
break;
}
} else
skipFirst = false;
if (io->frames.size() == (skipFirst ? 1 : 0)) {
bop = 0;
if (dop == 2) dop = 1;
}
} else if (id == kId_IDAT) {
hasInfo = true;
if (processing_data(png_ptr, info_ptr, chunk.p, chunk.size)) {
delete[] chunk.p;
break;
}
} else if (id == kId_fdAT && isAnimated) {
png_save_uint_32(chunk.p + 4, chunk.size - 16);
memcpy(chunk.p + 8, "IDAT", 4);
if (processing_data(png_ptr, info_ptr, chunk.p + 4, chunk.size - 4)) {
delete[] chunk.p;
break;
}
} else if (!isAbc(chunk.p[4]) || !isAbc(chunk.p[5]) ||
!isAbc(chunk.p[6]) || !isAbc(chunk.p[7])) {
delete[] chunk.p;
break;
} else if (!hasInfo) {
if (processing_data(png_ptr, info_ptr, chunk.p, chunk.size)) {
delete[] chunk.p;
break;
}
chunksInfo.push_back(chunk);
continue;
}
delete[] chunk.p;
}
}
delete[] frameRaw.rows;
delete[] frameRaw.p;
}
for (i = 0; i < chunksInfo.size(); i++) delete[] chunksInfo[i].p;
chunksInfo.clear();
delete[] chunkIHDR.p;
if (errorstate) return false;
SetIntensityTarget(io);
return true;
}
| null | null | 196,993
|
123816616739143632951179511270786230002
| 212
|
Fix handling of APNG with 0 delay_den (#313)
|
other
|
drogon
|
3c785326c63a34aa1799a639ae185bc9453cb447
| 1
|
int HttpFileImpl::save(const std::string &path) const
{
assert(!path.empty());
if (fileName_.empty())
return -1;
filesystem::path fsPath(utils::toNativePath(path));
if (!fsPath.is_absolute() &&
(!fsPath.has_parent_path() ||
(fsPath.begin()->string() != "." && fsPath.begin()->string() != "..")))
{
filesystem::path fsUploadPath(utils::toNativePath(
HttpAppFrameworkImpl::instance().getUploadPath()));
fsPath = fsUploadPath / fsPath;
}
filesystem::path fsFileName(utils::toNativePath(fileName_));
if (!filesystem::exists(fsPath))
{
LOG_TRACE << "create path:" << fsPath;
drogon::error_code err;
filesystem::create_directories(fsPath, err);
if (err)
{
LOG_SYSERR;
return -1;
}
}
return saveTo(fsPath / fsFileName);
}
| null | null | 197,057
|
300389952786254526295960796960160809202
| 28
|
Prevent malformed upload path causing arbitrary write (#1174)
|
other
|
tensorflow
|
bc9c546ce7015c57c2f15c168b3d9201de679a1d
| 1
|
void Compute(OpKernelContext* c) override {
core::RefCountPtr<Var> v;
OP_REQUIRES_OK(c, LookupResource(c, HandleFromInput(c, 0), &v));
OP_REQUIRES_OK(c, EnsureSparseVariableAccess<Device, T>(c, v.get()));
// NOTE: We hold the lock for the whole gather operation instead
// of increasing the reference count of v->tensor() to avoid a
// situation where a write to the same variable will see a
// reference count greater than one and make a copy of the
// (potentially very large) tensor buffer.
tf_shared_lock ml(*v->mu());
const Tensor& params = *v->tensor();
const Tensor& indices = c->input(1);
OP_REQUIRES(
c, TensorShapeUtils::IsVectorOrHigher(params.shape()),
errors::InvalidArgument("params must be at least 1 dimensional"));
// Check that we have enough index space
const int64_t N = indices.NumElements();
OP_REQUIRES(
c, params.dim_size(0) <= std::numeric_limits<Index>::max(),
errors::InvalidArgument("params.shape[0] too large for ",
DataTypeString(DataTypeToEnum<Index>::v()),
" indexing: ", params.dim_size(0), " > ",
std::numeric_limits<Index>::max()));
// The result shape is params.shape[:batch_dims] +
// indices.shape[batch_dims:] + params.shape[batch_dims+1:].
TensorShape result_shape;
for (int i = 0; i < batch_dims_; ++i) {
result_shape.AddDim(params.dim_size(i));
}
for (int i = batch_dims_; i < indices.dims(); ++i) {
result_shape.AddDim(indices.dim_size(i));
}
for (int i = batch_dims_ + 1; i < params.dims(); ++i) {
result_shape.AddDim(params.dim_size(i));
}
Tensor* out = nullptr;
Tensor tmp;
if (params.dtype() == DT_VARIANT) {
tmp = Tensor(DT_VARIANT, result_shape);
c->set_output(0, tmp);
out = &tmp;
} else {
OP_REQUIRES_OK(c, c->allocate_output(0, result_shape, &out));
}
if (N > 0) {
Tensor tmp_indices;
// Points to the original or updated (if batch_dims is set) indices.
const Tensor* op_indices = &indices;
if (batch_dims_ > 0) {
OP_REQUIRES_OK(c, c->allocate_temp(indices.dtype(), indices.shape(),
&tmp_indices));
functor::DenseUpdate<Device, Index, ASSIGN> copy_functor;
copy_functor(c->eigen_device<Device>(), tmp_indices.flat<Index>(),
indices.flat<Index>());
AddBatchOffsets(&tmp_indices, params);
op_indices = &tmp_indices;
}
int64_t gather_dim_size = 1;
for (int idx = 0; idx <= batch_dims_; ++idx) {
gather_dim_size *= params.dim_size(idx);
}
int64_t inner_size = 1;
for (int i = batch_dims_ + 1; i < params.dims(); ++i) {
inner_size *= params.dim_size(i);
}
auto params_flat = params.shaped<T, 3>({1, gather_dim_size, inner_size});
const auto indices_flat = op_indices->flat<Index>();
auto out_flat = out->shaped<T, 3>({1, N, out->NumElements() / N});
functor::GatherFunctor<Device, T, Index> functor;
int64_t bad_i = functor(c, params_flat, indices_flat, out_flat);
OP_REQUIRES(
c, bad_i < 0,
errors::InvalidArgument(
"indices", SliceDebugString(indices.shape(), bad_i), " = ",
indices_flat(bad_i), " is not in [0, ", params.dim_size(0), ")"));
}
}
| null | null | 197,110
|
294361236653337986849576392701719119932
| 86
|
Prevent heap oob access in `resource_variable_ops.cc`
PiperOrigin-RevId: 387936433
Change-Id: I9e71ddaa8dbd51ec6afbf163a6b3b591f193b4f6
|
other
|
tinyexr
|
a685e3332f61cd4e59324bf3f669d36973d64270
| 1
|
static int DecodeChunk(EXRImage *exr_image, const EXRHeader *exr_header,
const std::vector<tinyexr::tinyexr_uint64> &offsets,
const unsigned char *head, const size_t size,
std::string *err) {
int num_channels = exr_header->num_channels;
int num_scanline_blocks = 1;
if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) {
num_scanline_blocks = 16;
} else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) {
num_scanline_blocks = 32;
} else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) {
num_scanline_blocks = 16;
}
int data_width = exr_header->data_window[2] - exr_header->data_window[0] + 1;
int data_height = exr_header->data_window[3] - exr_header->data_window[1] + 1;
if ((data_width < 0) || (data_height < 0)) {
if (err) {
std::stringstream ss;
ss << "Invalid data width or data height: " << data_width << ", "
<< data_height << std::endl;
(*err) += ss.str();
}
return TINYEXR_ERROR_INVALID_DATA;
}
// Do not allow too large data_width and data_height. header invalid?
{
const int threshold = 1024 * 8192; // heuristics
if ((data_width > threshold) || (data_height > threshold)) {
if (err) {
std::stringstream ss;
ss << "data_with or data_height too large. data_width: " << data_width
<< ", "
<< "data_height = " << data_height << std::endl;
(*err) += ss.str();
}
return TINYEXR_ERROR_INVALID_DATA;
}
}
size_t num_blocks = offsets.size();
std::vector<size_t> channel_offset_list;
int pixel_data_size = 0;
size_t channel_offset = 0;
if (!tinyexr::ComputeChannelLayout(&channel_offset_list, &pixel_data_size,
&channel_offset, num_channels,
exr_header->channels)) {
if (err) {
(*err) += "Failed to compute channel layout.\n";
}
return TINYEXR_ERROR_INVALID_DATA;
}
bool invalid_data = false; // TODO(LTE): Use atomic lock for MT safety.
if (exr_header->tiled) {
// value check
if (exr_header->tile_size_x < 0) {
if (err) {
std::stringstream ss;
ss << "Invalid tile size x : " << exr_header->tile_size_x << "\n";
(*err) += ss.str();
}
return TINYEXR_ERROR_INVALID_HEADER;
}
if (exr_header->tile_size_y < 0) {
if (err) {
std::stringstream ss;
ss << "Invalid tile size y : " << exr_header->tile_size_y << "\n";
(*err) += ss.str();
}
return TINYEXR_ERROR_INVALID_HEADER;
}
size_t num_tiles = offsets.size(); // = # of blocks
exr_image->tiles = static_cast<EXRTile *>(
calloc(sizeof(EXRTile), static_cast<size_t>(num_tiles)));
for (size_t tile_idx = 0; tile_idx < num_tiles; tile_idx++) {
// Allocate memory for each tile.
exr_image->tiles[tile_idx].images = tinyexr::AllocateImage(
num_channels, exr_header->channels, exr_header->requested_pixel_types,
exr_header->tile_size_x, exr_header->tile_size_y);
// 16 byte: tile coordinates
// 4 byte : data size
// ~ : data(uncompressed or compressed)
if (offsets[tile_idx] + sizeof(int) * 5 > size) {
if (err) {
(*err) += "Insufficient data size.\n";
}
return TINYEXR_ERROR_INVALID_DATA;
}
size_t data_size = size_t(size - (offsets[tile_idx] + sizeof(int) * 5));
const unsigned char *data_ptr =
reinterpret_cast<const unsigned char *>(head + offsets[tile_idx]);
int tile_coordinates[4];
memcpy(tile_coordinates, data_ptr, sizeof(int) * 4);
tinyexr::swap4(reinterpret_cast<unsigned int *>(&tile_coordinates[0]));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&tile_coordinates[1]));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&tile_coordinates[2]));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&tile_coordinates[3]));
// @todo{ LoD }
if (tile_coordinates[2] != 0) {
return TINYEXR_ERROR_UNSUPPORTED_FEATURE;
}
if (tile_coordinates[3] != 0) {
return TINYEXR_ERROR_UNSUPPORTED_FEATURE;
}
int data_len;
memcpy(&data_len, data_ptr + 16,
sizeof(int)); // 16 = sizeof(tile_coordinates)
tinyexr::swap4(reinterpret_cast<unsigned int *>(&data_len));
if (data_len < 4 || size_t(data_len) > data_size) {
if (err) {
(*err) += "Insufficient data length.\n";
}
return TINYEXR_ERROR_INVALID_DATA;
}
// Move to data addr: 20 = 16 + 4;
data_ptr += 20;
tinyexr::DecodeTiledPixelData(
exr_image->tiles[tile_idx].images,
&(exr_image->tiles[tile_idx].width),
&(exr_image->tiles[tile_idx].height),
exr_header->requested_pixel_types, data_ptr,
static_cast<size_t>(data_len), exr_header->compression_type,
exr_header->line_order, data_width, data_height, tile_coordinates[0],
tile_coordinates[1], exr_header->tile_size_x, exr_header->tile_size_y,
static_cast<size_t>(pixel_data_size),
static_cast<size_t>(exr_header->num_custom_attributes),
exr_header->custom_attributes,
static_cast<size_t>(exr_header->num_channels), exr_header->channels,
channel_offset_list);
exr_image->tiles[tile_idx].offset_x = tile_coordinates[0];
exr_image->tiles[tile_idx].offset_y = tile_coordinates[1];
exr_image->tiles[tile_idx].level_x = tile_coordinates[2];
exr_image->tiles[tile_idx].level_y = tile_coordinates[3];
exr_image->num_tiles = static_cast<int>(num_tiles);
}
} else { // scanline format
// Don't allow too large image(256GB * pixel_data_size or more). Workaround
// for #104.
size_t total_data_len =
size_t(data_width) * size_t(data_height) * size_t(num_channels);
const bool total_data_len_overflown = sizeof(void*) == 8 ? (total_data_len >= 0x4000000000) : false;
if ((total_data_len == 0) || total_data_len_overflown ) {
if (err) {
std::stringstream ss;
ss << "Image data size is zero or too large: width = " << data_width
<< ", height = " << data_height << ", channels = " << num_channels
<< std::endl;
(*err) += ss.str();
}
return TINYEXR_ERROR_INVALID_DATA;
}
exr_image->images = tinyexr::AllocateImage(
num_channels, exr_header->channels, exr_header->requested_pixel_types,
data_width, data_height);
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (int y = 0; y < static_cast<int>(num_blocks); y++) {
size_t y_idx = static_cast<size_t>(y);
if (offsets[y_idx] + sizeof(int) * 2 > size) {
invalid_data = true;
} else {
// 4 byte: scan line
// 4 byte: data size
// ~ : pixel data(uncompressed or compressed)
size_t data_size = size_t(size - (offsets[y_idx] + sizeof(int) * 2));
const unsigned char *data_ptr =
reinterpret_cast<const unsigned char *>(head + offsets[y_idx]);
int line_no;
memcpy(&line_no, data_ptr, sizeof(int));
int data_len;
memcpy(&data_len, data_ptr + 4, sizeof(int));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&line_no));
tinyexr::swap4(reinterpret_cast<unsigned int *>(&data_len));
if (size_t(data_len) > data_size) {
invalid_data = true;
} else if (data_len == 0) {
// TODO(syoyo): May be ok to raise the threshold for example `data_len
// < 4`
invalid_data = true;
} else {
// line_no may be negative.
int end_line_no = (std::min)(line_no + num_scanline_blocks,
(exr_header->data_window[3] + 1));
int num_lines = end_line_no - line_no;
if (num_lines <= 0) {
invalid_data = true;
} else {
// Move to data addr: 8 = 4 + 4;
data_ptr += 8;
// Adjust line_no with data_window.bmin.y
// overflow check
tinyexr_int64 lno = static_cast<tinyexr_int64>(line_no) - static_cast<tinyexr_int64>(exr_header->data_window[1]);
if (lno > std::numeric_limits<int>::max()) {
line_no = -1; // invalid
} else if (lno < -std::numeric_limits<int>::max()) {
line_no = -1; // invalid
} else {
line_no -= exr_header->data_window[1];
}
if (line_no < 0) {
invalid_data = true;
} else {
if (!tinyexr::DecodePixelData(
exr_image->images, exr_header->requested_pixel_types,
data_ptr, static_cast<size_t>(data_len),
exr_header->compression_type, exr_header->line_order,
data_width, data_height, data_width, y, line_no,
num_lines, static_cast<size_t>(pixel_data_size),
static_cast<size_t>(exr_header->num_custom_attributes),
exr_header->custom_attributes,
static_cast<size_t>(exr_header->num_channels),
exr_header->channels, channel_offset_list)) {
invalid_data = true;
}
}
}
}
}
} // omp parallel
}
if (invalid_data) {
if (err) {
std::stringstream ss;
(*err) += "Invalid data found when decoding pixels.\n";
}
return TINYEXR_ERROR_INVALID_DATA;
}
// Overwrite `pixel_type` with `requested_pixel_type`.
{
for (int c = 0; c < exr_header->num_channels; c++) {
exr_header->pixel_types[c] = exr_header->requested_pixel_types[c];
}
}
{
exr_image->num_channels = num_channels;
exr_image->width = data_width;
exr_image->height = data_height;
}
return TINYEXR_SUCCESS;
}
| null | null | 197,111
|
270881477601119047421793150845336761803
| 277
|
Make line_no with too large value(2**20) invalid. Fixes #124
|
other
|
mongo
|
07b8851825836911265e909d6842d4586832f9bb
| 1
|
DocumentSource::GetNextResult DocumentSourceGroup::initialize() {
const size_t numAccumulators = _accumulatedFields.size();
// Barring any pausing, this loop exhausts 'pSource' and populates '_groups'.
GetNextResult input = pSource->getNext();
for (; input.isAdvanced(); input = pSource->getNext()) {
if (_memoryTracker.shouldSpillWithAttemptToSaveMemory([this]() { return freeMemory(); })) {
_sortedFiles.push_back(spill());
}
// We release the result document here so that it does not outlive the end of this loop
// iteration. Not releasing could lead to an array copy when this group follows an unwind.
auto rootDocument = input.releaseDocument();
Value id = computeId(rootDocument);
// Look for the _id value in the map. If it's not there, add a new entry with a blank
// accumulator. This is done in a somewhat odd way in order to avoid hashing 'id' and
// looking it up in '_groups' multiple times.
const size_t oldSize = _groups->size();
vector<intrusive_ptr<AccumulatorState>>& group = (*_groups)[id];
const bool inserted = _groups->size() != oldSize;
if (inserted) {
_memoryTracker.memoryUsageBytes += id.getApproximateSize();
// Initialize and add the accumulators
Value expandedId = expandId(id);
Document idDoc =
expandedId.getType() == BSONType::Object ? expandedId.getDocument() : Document();
group.reserve(numAccumulators);
for (auto&& accumulatedField : _accumulatedFields) {
auto accum = accumulatedField.makeAccumulator();
Value initializerValue =
accumulatedField.expr.initializer->evaluate(idDoc, &pExpCtx->variables);
accum->startNewGroup(initializerValue);
group.push_back(accum);
}
} else {
for (auto&& groupObj : group) {
// subtract old mem usage. New usage added back after processing.
_memoryTracker.memoryUsageBytes -= groupObj->memUsageForSorter();
}
}
/* tickle all the accumulators for the group we found */
dassert(numAccumulators == group.size());
for (size_t i = 0; i < numAccumulators; i++) {
group[i]->process(
_accumulatedFields[i].expr.argument->evaluate(rootDocument, &pExpCtx->variables),
_doingMerge);
_memoryTracker.memoryUsageBytes += group[i]->memUsageForSorter();
}
if (kDebugBuild && !storageGlobalParams.readOnly) {
// In debug mode, spill every time we have a duplicate id to stress merge logic.
if (!inserted && // is a dup
!pExpCtx->inMongos && // can't spill to disk in mongos
!_memoryTracker.allowDiskUse && // don't change behavior when testing external sort
_sortedFiles.size() < 20) { // don't open too many FDs
_sortedFiles.push_back(spill());
}
}
}
switch (input.getStatus()) {
case DocumentSource::GetNextResult::ReturnStatus::kAdvanced: {
MONGO_UNREACHABLE; // We consumed all advances above.
}
case DocumentSource::GetNextResult::ReturnStatus::kPauseExecution: {
return input; // Propagate pause.
}
case DocumentSource::GetNextResult::ReturnStatus::kEOF: {
// Do any final steps necessary to prepare to output results.
if (!_sortedFiles.empty()) {
_spilled = true;
if (!_groups->empty()) {
_sortedFiles.push_back(spill());
}
// We won't be using groups again so free its memory.
_groups = pExpCtx->getValueComparator().makeUnorderedValueMap<Accumulators>();
_sorterIterator.reset(Sorter<Value, Value>::Iterator::merge(
_sortedFiles, SortOptions(), SorterComparator(pExpCtx->getValueComparator())));
// prepare current to accumulate data
_currentAccumulators.reserve(numAccumulators);
for (auto&& accumulatedField : _accumulatedFields) {
_currentAccumulators.push_back(accumulatedField.makeAccumulator());
}
verify(_sorterIterator->more()); // we put data in, we should get something out.
_firstPartOfNextGroup = _sorterIterator->next();
} else {
// start the group iterator
groupsIterator = _groups->begin();
}
// This must happen last so that, unless control gets here, we will re-enter
// initialization after getting a GetNextResult::ResultState::kPauseExecution.
_initialized = true;
return input;
}
}
MONGO_UNREACHABLE;
}
| null | null | 197,179
|
299639630867010953869050856327872645652
| 110
|
SERVER-60218-44: SERVER-60218 add initialize helper function for document_source_group (cherry picked from commit 867f52afbb79bc00e35c70f8e0681b7d602f97b2)
|
other
|
gpac
|
dc7de8d3d604426c7a6e628d90cb9fb88e7b4c2c
| 1
|
GF_Err BD_DecMFFieldVec(GF_BifsDecoder * codec, GF_BitStream *bs, GF_Node *node, GF_FieldInfo *field, Bool is_mem_com)
{
GF_Err e;
u32 NbBits, nbFields;
u32 i;
GF_ChildNodeItem *last;
u8 qp_local, qp_on, initial_qp;
GF_FieldInfo sffield;
memset(&sffield, 0, sizeof(GF_FieldInfo));
sffield.fieldIndex = field->fieldIndex;
sffield.fieldType = gf_sg_vrml_get_sf_type(field->fieldType);
sffield.NDTtype = field->NDTtype;
sffield.name = field->name;
initial_qp = qp_local = qp_on = 0;
//vector description - alloc the MF size before
NbBits = gf_bs_read_int(bs, 5);
nbFields = gf_bs_read_int(bs, NbBits);
if (codec->ActiveQP) {
initial_qp = 1;
/*this is for QP 14*/
gf_bifs_dec_qp14_set_length(codec, nbFields);
}
if (field->fieldType != GF_SG_VRML_MFNODE) {
e = gf_sg_vrml_mf_alloc(field->far_ptr, field->fieldType, nbFields);
if (e) return e;
for (i=0; i<nbFields; i++) {
e = gf_sg_vrml_mf_get_item(field->far_ptr, field->fieldType, & sffield.far_ptr, i);
if (e) return e;
e = gf_bifs_dec_sf_field(codec, bs, node, &sffield, GF_FALSE);
if (e) return e;
}
} else {
last = NULL;
for (i=0; i<nbFields; i++) {
GF_Node *new_node = gf_bifs_dec_node(codec, bs, field->NDTtype);
if (new_node) {
e = gf_node_register(new_node, is_mem_com ? NULL : node);
if (e) return e;
if (node) {
/*special case for QP, register as the current QP*/
if (gf_node_get_tag(new_node) == TAG_MPEG4_QuantizationParameter) {
qp_local = ((M_QuantizationParameter *)new_node)->isLocal;
/*we have a QP in the same scope, remove previous
NB: we assume this is the right behavior, the spec doesn't say
whether QP is cumulative or not*/
if (qp_on) gf_bifs_dec_qp_remove(codec, GF_FALSE);
e = gf_bifs_dec_qp_set(codec, new_node);
if (e) return e;
qp_on = 1;
if (qp_local) qp_local = 2;
if (codec->force_keep_qp) {
e = gf_node_list_add_child_last(field->far_ptr, new_node, &last);
if (e) return e;
} else {
gf_node_register(new_node, NULL);
gf_node_unregister(new_node, node);
}
} else {
e = gf_node_list_add_child_last(field->far_ptr, new_node, &last);
if (e) return e;
}
}
/*proto coding*/
else if (codec->pCurrentProto) {
/*TO DO: what happens if this is a QP node on the interface ?*/
e = gf_node_list_add_child_last( (GF_ChildNodeItem **)field->far_ptr, new_node, &last);
if (e) return e;
}
} else {
return codec->LastError ? codec->LastError : GF_NON_COMPLIANT_BITSTREAM;
}
}
/*according to the spec, the QP applies to the current node itself, not just children.
If IsLocal is TRUE remove the node*/
if (qp_on && qp_local) {
if (qp_local == 2) {
// qp_local = 1;
} else {
//ask to get rid of QP and reactivate if we had a QP when entering the node
gf_bifs_dec_qp_remove(codec, initial_qp);
// qp_local = 0;
}
}
}
/*finally delete the QP if any (local or not) as we get out of this node*/
if (qp_on) gf_bifs_dec_qp_remove(codec, GF_TRUE);
return GF_OK;
}
| null | null | 197,499
|
299257728605197431750731122978204720459
| 96
|
fixed #2212
|
other
|
radare2
|
fc285cecb8469f0262db0170bf6dd7c01d9b8ed5
| 1
|
static RList *oneshotall_buffer(RBin *bin, RBuffer *b) {
RList *list = r_list_newf (free);
RBinXtrData *meta = get_the_meta (bin, b);
r_list_append (list, meta);
return list;
}
| null | null | 197,579
|
140388220319988183510409398909926632593
| 6
|
Fix #20354
|
other
|
tensorflow
|
be7a4de6adfbd303ce08be4332554dff70362612
| 1
|
void Compute(OpKernelContext* context) override {
// Read ragged_splits inputs.
OpInputList ragged_nested_splits_in;
OP_REQUIRES_OK(context, context->input_list("rt_nested_splits",
&ragged_nested_splits_in));
const int ragged_nested_splits_len = ragged_nested_splits_in.size();
RaggedTensorVariant batched_ragged_input;
// Read ragged_values input.
batched_ragged_input.set_values(context->input(ragged_nested_splits_len));
batched_ragged_input.mutable_nested_splits()->reserve(
ragged_nested_splits_len);
for (int i = 0; i < ragged_nested_splits_len; i++) {
batched_ragged_input.append_splits(ragged_nested_splits_in[i]);
}
if (!batched_input_) {
// Encode as a Scalar Variant Tensor.
Tensor* encoded_scalar;
OP_REQUIRES_OK(context, context->allocate_output(0, TensorShape({}),
&encoded_scalar));
encoded_scalar->scalar<Variant>()() = std::move(batched_ragged_input);
return;
}
// Unbatch the Ragged Tensor and encode the components.
std::vector<RaggedTensorVariant> unbatched_ragged_input;
auto batched_splits_top_vec =
batched_ragged_input.splits(0).vec<SPLIT_TYPE>();
int num_components = batched_splits_top_vec.size() - 1;
OP_REQUIRES(context, num_components >= 0,
errors::Internal("Invalid split argument."));
OP_REQUIRES_OK(context, UnbatchRaggedZerothDim<VALUE_TYPE, SPLIT_TYPE>(
batched_ragged_input, &unbatched_ragged_input));
// Bundle the encoded scalar Variant Tensors into a rank-1 Variant Tensor.
Tensor* encoded_vector;
int output_size = unbatched_ragged_input.size();
OP_REQUIRES_OK(context,
context->allocate_output(0, TensorShape({output_size}),
&encoded_vector));
auto encoded_vector_t = encoded_vector->vec<Variant>();
for (int i = 0; i < output_size; i++) {
encoded_vector_t(i) = unbatched_ragged_input[i];
}
}
| null | null | 197,719
|
209500931068648342443345704454092620756
| 45
|
Ensure non-empty rt_nested_splits in tf.raw_ops.RaggedTensorToVariant
PiperOrigin-RevId: 387664237
Change-Id: Ia1700c34b5610873d63561abc86e23b46ead93b3
|
other
|
tensorflow
|
c79ba87153ee343401dbe9d1954d7f79e521eb14
| 1
|
Status TransposeShapeFn(InferenceContext* c) {
ShapeHandle input = c->input(0);
ShapeHandle perm_shape = c->input(1);
const Tensor* perm = c->input_tensor(1);
DimensionHandle perm_elems = c->NumElements(perm_shape);
// If we don't have rank information on the input or value information on
// perm we can't return any shape information, otherwise we have enough
// information to at least find the rank of the output.
if (!c->RankKnown(input) && !c->ValueKnown(perm_elems) && perm == nullptr) {
c->set_output(0, c->UnknownShape());
return Status::OK();
}
// Find our value of the rank.
int64_t rank;
if (c->RankKnown(input)) {
rank = c->Rank(input);
} else if (c->ValueKnown(perm_elems)) {
rank = c->Value(perm_elems);
} else {
rank = perm->NumElements();
}
if (!c->RankKnown(input) && rank < 2) {
// A permutation array containing a single element is ambiguous. It could
// indicate either a scalar or a 1-dimensional array, both of which the
// transpose op returns unchanged.
c->set_output(0, input);
return Status::OK();
}
std::vector<DimensionHandle> dims;
dims.resize(rank);
TF_RETURN_IF_ERROR(c->WithRank(input, rank, &input));
// Ensure that perm is a vector and has rank elements.
TF_RETURN_IF_ERROR(c->WithRank(perm_shape, 1, &perm_shape));
TF_RETURN_IF_ERROR(c->WithValue(perm_elems, rank, &perm_elems));
// If we know the rank of the input and the value of perm, we can return
// all shape information, otherwise we can only return rank information,
// but no information for the dimensions.
if (perm != nullptr) {
std::vector<int64_t> data;
if (perm->dtype() == DT_INT32) {
data = AsInt64<int32>(perm, rank);
} else {
data = AsInt64<int64_t>(perm, rank);
}
for (int32_t i = 0; i < rank; ++i) {
int64_t in_idx = data[i];
if (in_idx >= rank) {
return errors::InvalidArgument("perm dim ", in_idx,
" is out of range of input rank ", rank);
}
dims[i] = c->Dim(input, in_idx);
}
} else {
for (int i = 0; i < rank; ++i) {
dims[i] = c->UnknownDim();
}
}
c->set_output(0, c->MakeShape(dims));
return Status::OK();
}
| null | null | 197,748
|
34055993943311259251029225987542874775
| 65
|
Make Transpose's shape inference function validate that negative `perm` values are within the tensor's rank.
PiperOrigin-RevId: 403252853
Change-Id: Ia6b31b45b237312668bb31c2c3b3c7bbce2d2610
|
other
|
tensorflow
|
bb6a0383ed553c286f87ca88c207f6774d5c4a8f
| 1
|
TfLiteStatus EvalGatherNd(TfLiteContext* context, const TfLiteTensor* params,
const TfLiteTensor* indices, TfLiteTensor* output) {
switch (params->type) {
case kTfLiteFloat32:
return GatherNd<float, IndicesT>(params, indices, output);
case kTfLiteUInt8:
return GatherNd<uint8_t, IndicesT>(params, indices, output);
case kTfLiteInt8:
return GatherNd<int8_t, IndicesT>(params, indices, output);
case kTfLiteInt16:
return GatherNd<int16_t, IndicesT>(params, indices, output);
case kTfLiteInt32:
return GatherNd<int32_t, IndicesT>(params, indices, output);
case kTfLiteInt64:
return GatherNd<int64_t, IndicesT>(params, indices, output);
case kTfLiteString:
return GatherNdString<IndicesT>(params, indices, output);
default:
context->ReportError(context,
"Params type '%s' are not supported by gather_nd.",
TfLiteTypeGetName(params->type));
return kTfLiteError;
}
}
| null | null | 197,760
|
79501722422646953902317860019376579160
| 24
|
Prevent heap OOB read in TFLite's `gather_nd.cc`.
Passing negative indices is illegal but there was a missing check so that resulted in OOB accesses.
PiperOrigin-RevId: 387208551
Change-Id: I6b7a8a62d3e7c13a16d81619e5bc23ae2cdbc7fd
|
other
|
curl
|
8dfc93e573ca740544a2d79ebb0ed786592c65c3
| 1
|
Curl_cookie_add(struct Curl_easy *data,
/*
* The 'data' pointer here may be NULL at times, and thus
* must only be used very carefully for things that can deal
* with data being NULL. Such as infof() and similar
*/
struct CookieInfo *c,
bool httpheader, /* TRUE if HTTP header-style line */
bool noexpire, /* if TRUE, skip remove_expired() */
char *lineptr, /* first character of the line */
const char *domain, /* default domain */
const char *path, /* full path used when this cookie is set,
used to get default path for the cookie
unless set */
bool secure) /* TRUE if connection is over secure origin */
{
struct Cookie *clist;
struct Cookie *co;
struct Cookie *lastc = NULL;
struct Cookie *replace_co = NULL;
struct Cookie *replace_clist = NULL;
time_t now = time(NULL);
bool replace_old = FALSE;
bool badcookie = FALSE; /* cookies are good by default. mmmmm yummy */
size_t myhash;
#ifdef CURL_DISABLE_VERBOSE_STRINGS
(void)data;
#endif
DEBUGASSERT(MAX_SET_COOKIE_AMOUNT <= 255); /* counter is an unsigned char */
if(data->req.setcookies >= MAX_SET_COOKIE_AMOUNT)
return NULL;
/* First, alloc and init a new struct for it */
co = calloc(1, sizeof(struct Cookie));
if(!co)
return NULL; /* bail out if we're this low on memory */
if(httpheader) {
/* This line was read off a HTTP-header */
char name[MAX_NAME];
char what[MAX_NAME];
const char *ptr;
const char *semiptr;
size_t linelength = strlen(lineptr);
if(linelength > MAX_COOKIE_LINE) {
/* discard overly long lines at once */
free(co);
return NULL;
}
semiptr = strchr(lineptr, ';'); /* first, find a semicolon */
while(*lineptr && ISBLANK(*lineptr))
lineptr++;
ptr = lineptr;
do {
/* we have a <what>=<this> pair or a stand-alone word here */
name[0] = what[0] = 0; /* init the buffers */
if(1 <= sscanf(ptr, "%" MAX_NAME_TXT "[^;\r\n=] =%"
MAX_NAME_TXT "[^;\r\n]",
name, what)) {
/*
* Use strstore() below to properly deal with received cookie
* headers that have the same string property set more than once,
* and then we use the last one.
*/
const char *whatptr;
bool done = FALSE;
bool sep;
size_t len = strlen(what);
size_t nlen = strlen(name);
const char *endofn = &ptr[ nlen ];
/*
* Check for too long individual name or contents, or too long
* combination of name + contents. Chrome and Firefox support 4095 or
* 4096 bytes combo
*/
if(nlen >= (MAX_NAME-1) || len >= (MAX_NAME-1) ||
((nlen + len) > MAX_NAME)) {
freecookie(co);
infof(data, "oversized cookie dropped, name/val %zu + %zu bytes",
nlen, len);
return NULL;
}
/* name ends with a '=' ? */
sep = (*endofn == '=')?TRUE:FALSE;
if(nlen) {
endofn--; /* move to the last character */
if(ISBLANK(*endofn)) {
/* skip trailing spaces in name */
while(*endofn && ISBLANK(*endofn) && nlen) {
endofn--;
nlen--;
}
name[nlen] = 0; /* new end of name */
}
}
/* Strip off trailing whitespace from the 'what' */
while(len && ISBLANK(what[len-1])) {
what[len-1] = 0;
len--;
}
/* Skip leading whitespace from the 'what' */
whatptr = what;
while(*whatptr && ISBLANK(*whatptr))
whatptr++;
/*
* Check if we have a reserved prefix set before anything else, as we
* otherwise have to test for the prefix in both the cookie name and
* "the rest". Prefixes must start with '__' and end with a '-', so
* only test for names where that can possibly be true.
*/
if(nlen > 3 && name[0] == '_' && name[1] == '_') {
if(!strncmp("__Secure-", name, 9))
co->prefix |= COOKIE_PREFIX__SECURE;
else if(!strncmp("__Host-", name, 7))
co->prefix |= COOKIE_PREFIX__HOST;
}
if(!co->name) {
/* The very first name/value pair is the actual cookie name */
if(!sep) {
/* Bad name/value pair. */
badcookie = TRUE;
break;
}
co->name = strdup(name);
co->value = strdup(whatptr);
done = TRUE;
if(!co->name || !co->value) {
badcookie = TRUE;
break;
}
}
else if(!len) {
/*
* this was a "<name>=" with no content, and we must allow
* 'secure' and 'httponly' specified this weirdly
*/
done = TRUE;
/*
* secure cookies are only allowed to be set when the connection is
* using a secure protocol, or when the cookie is being set by
* reading from file
*/
if(strcasecompare("secure", name)) {
if(secure || !c->running) {
co->secure = TRUE;
}
else {
badcookie = TRUE;
break;
}
}
else if(strcasecompare("httponly", name))
co->httponly = TRUE;
else if(sep)
/* there was a '=' so we're not done parsing this field */
done = FALSE;
}
if(done)
;
else if(strcasecompare("path", name)) {
strstore(&co->path, whatptr);
if(!co->path) {
badcookie = TRUE; /* out of memory bad */
break;
}
free(co->spath); /* if this is set again */
co->spath = sanitize_cookie_path(co->path);
if(!co->spath) {
badcookie = TRUE; /* out of memory bad */
break;
}
}
else if(strcasecompare("domain", name) && whatptr[0]) {
bool is_ip;
/*
* Now, we make sure that our host is within the given domain, or
* the given domain is not valid and thus cannot be set.
*/
if('.' == whatptr[0])
whatptr++; /* ignore preceding dot */
#ifndef USE_LIBPSL
/*
* Without PSL we don't know when the incoming cookie is set on a
* TLD or otherwise "protected" suffix. To reduce risk, we require a
* dot OR the exact host name being "localhost".
*/
if(bad_domain(whatptr))
domain = ":";
#endif
is_ip = Curl_host_is_ipnum(domain ? domain : whatptr);
if(!domain
|| (is_ip && !strcmp(whatptr, domain))
|| (!is_ip && tailmatch(whatptr, domain))) {
strstore(&co->domain, whatptr);
if(!co->domain) {
badcookie = TRUE;
break;
}
if(!is_ip)
co->tailmatch = TRUE; /* we always do that if the domain name was
given */
}
else {
/*
* We did not get a tailmatch and then the attempted set domain is
* not a domain to which the current host belongs. Mark as bad.
*/
badcookie = TRUE;
infof(data, "skipped cookie with bad tailmatch domain: %s",
whatptr);
}
}
else if(strcasecompare("version", name)) {
strstore(&co->version, whatptr);
if(!co->version) {
badcookie = TRUE;
break;
}
}
else if(strcasecompare("max-age", name)) {
/*
* Defined in RFC2109:
*
* Optional. The Max-Age attribute defines the lifetime of the
* cookie, in seconds. The delta-seconds value is a decimal non-
* negative integer. After delta-seconds seconds elapse, the
* client should discard the cookie. A value of zero means the
* cookie should be discarded immediately.
*/
strstore(&co->maxage, whatptr);
if(!co->maxage) {
badcookie = TRUE;
break;
}
}
else if(strcasecompare("expires", name)) {
strstore(&co->expirestr, whatptr);
if(!co->expirestr) {
badcookie = TRUE;
break;
}
}
/*
* Else, this is the second (or more) name we don't know about!
*/
}
else {
/* this is an "illegal" <what>=<this> pair */
}
if(!semiptr || !*semiptr) {
/* we already know there are no more cookies */
semiptr = NULL;
continue;
}
ptr = semiptr + 1;
while(*ptr && ISBLANK(*ptr))
ptr++;
semiptr = strchr(ptr, ';'); /* now, find the next semicolon */
if(!semiptr && *ptr)
/*
* There are no more semicolons, but there's a final name=value pair
* coming up
*/
semiptr = strchr(ptr, '\0');
} while(semiptr);
if(co->maxage) {
CURLofft offt;
offt = curlx_strtoofft((*co->maxage == '\"')?
&co->maxage[1]:&co->maxage[0], NULL, 10,
&co->expires);
if(offt == CURL_OFFT_FLOW)
/* overflow, used max value */
co->expires = CURL_OFF_T_MAX;
else if(!offt) {
if(!co->expires)
/* already expired */
co->expires = 1;
else if(CURL_OFF_T_MAX - now < co->expires)
/* would overflow */
co->expires = CURL_OFF_T_MAX;
else
co->expires += now;
}
}
else if(co->expirestr) {
/*
* Note that if the date couldn't get parsed for whatever reason, the
* cookie will be treated as a session cookie
*/
co->expires = Curl_getdate_capped(co->expirestr);
/*
* Session cookies have expires set to 0 so if we get that back from the
* date parser let's add a second to make it a non-session cookie
*/
if(co->expires == 0)
co->expires = 1;
else if(co->expires < 0)
co->expires = 0;
}
if(!badcookie && !co->domain) {
if(domain) {
/* no domain was given in the header line, set the default */
co->domain = strdup(domain);
if(!co->domain)
badcookie = TRUE;
}
}
if(!badcookie && !co->path && path) {
/*
* No path was given in the header line, set the default. Note that the
* passed-in path to this function MAY have a '?' and following part that
* MUST NOT be stored as part of the path.
*/
char *queryp = strchr(path, '?');
/*
* queryp is where the interesting part of the path ends, so now we
* want to the find the last
*/
char *endslash;
if(!queryp)
endslash = strrchr(path, '/');
else
endslash = memrchr(path, '/', (queryp - path));
if(endslash) {
size_t pathlen = (endslash-path + 1); /* include end slash */
co->path = malloc(pathlen + 1); /* one extra for the zero byte */
if(co->path) {
memcpy(co->path, path, pathlen);
co->path[pathlen] = 0; /* null-terminate */
co->spath = sanitize_cookie_path(co->path);
if(!co->spath)
badcookie = TRUE; /* out of memory bad */
}
else
badcookie = TRUE;
}
}
/*
* If we didn't get a cookie name, or a bad one, the this is an illegal
* line so bail out.
*/
if(badcookie || !co->name) {
freecookie(co);
return NULL;
}
data->req.setcookies++;
}
else {
/*
* This line is NOT a HTTP header style line, we do offer support for
* reading the odd netscape cookies-file format here
*/
char *ptr;
char *firstptr;
char *tok_buf = NULL;
int fields;
/*
* IE introduced HTTP-only cookies to prevent XSS attacks. Cookies marked
* with httpOnly after the domain name are not accessible from javascripts,
* but since curl does not operate at javascript level, we include them
* anyway. In Firefox's cookie files, these lines are preceded with
* #HttpOnly_ and then everything is as usual, so we skip 10 characters of
* the line..
*/
if(strncmp(lineptr, "#HttpOnly_", 10) == 0) {
lineptr += 10;
co->httponly = TRUE;
}
if(lineptr[0]=='#') {
/* don't even try the comments */
free(co);
return NULL;
}
/* strip off the possible end-of-line characters */
ptr = strchr(lineptr, '\r');
if(ptr)
*ptr = 0; /* clear it */
ptr = strchr(lineptr, '\n');
if(ptr)
*ptr = 0; /* clear it */
firstptr = strtok_r(lineptr, "\t", &tok_buf); /* tokenize it on the TAB */
/*
* Now loop through the fields and init the struct we already have
* allocated
*/
for(ptr = firstptr, fields = 0; ptr && !badcookie;
ptr = strtok_r(NULL, "\t", &tok_buf), fields++) {
switch(fields) {
case 0:
if(ptr[0]=='.') /* skip preceding dots */
ptr++;
co->domain = strdup(ptr);
if(!co->domain)
badcookie = TRUE;
break;
case 1:
/*
* flag: A TRUE/FALSE value indicating if all machines within a given
* domain can access the variable. Set TRUE when the cookie says
* .domain.com and to false when the domain is complete www.domain.com
*/
co->tailmatch = strcasecompare(ptr, "TRUE")?TRUE:FALSE;
break;
case 2:
/* The file format allows the path field to remain not filled in */
if(strcmp("TRUE", ptr) && strcmp("FALSE", ptr)) {
/* only if the path doesn't look like a boolean option! */
co->path = strdup(ptr);
if(!co->path)
badcookie = TRUE;
else {
co->spath = sanitize_cookie_path(co->path);
if(!co->spath) {
badcookie = TRUE; /* out of memory bad */
}
}
break;
}
/* this doesn't look like a path, make one up! */
co->path = strdup("/");
if(!co->path)
badcookie = TRUE;
co->spath = strdup("/");
if(!co->spath)
badcookie = TRUE;
fields++; /* add a field and fall down to secure */
/* FALLTHROUGH */
case 3:
co->secure = FALSE;
if(strcasecompare(ptr, "TRUE")) {
if(secure || c->running)
co->secure = TRUE;
else
badcookie = TRUE;
}
break;
case 4:
if(curlx_strtoofft(ptr, NULL, 10, &co->expires))
badcookie = TRUE;
break;
case 5:
co->name = strdup(ptr);
if(!co->name)
badcookie = TRUE;
else {
/* For Netscape file format cookies we check prefix on the name */
if(strncasecompare("__Secure-", co->name, 9))
co->prefix |= COOKIE_PREFIX__SECURE;
else if(strncasecompare("__Host-", co->name, 7))
co->prefix |= COOKIE_PREFIX__HOST;
}
break;
case 6:
co->value = strdup(ptr);
if(!co->value)
badcookie = TRUE;
break;
}
}
if(6 == fields) {
/* we got a cookie with blank contents, fix it */
co->value = strdup("");
if(!co->value)
badcookie = TRUE;
else
fields++;
}
if(!badcookie && (7 != fields))
/* we did not find the sufficient number of fields */
badcookie = TRUE;
if(badcookie) {
freecookie(co);
return NULL;
}
}
if(co->prefix & COOKIE_PREFIX__SECURE) {
/* The __Secure- prefix only requires that the cookie be set secure */
if(!co->secure) {
freecookie(co);
return NULL;
}
}
if(co->prefix & COOKIE_PREFIX__HOST) {
/*
* The __Host- prefix requires the cookie to be secure, have a "/" path
* and not have a domain set.
*/
if(co->secure && co->path && strcmp(co->path, "/") == 0 && !co->tailmatch)
;
else {
freecookie(co);
return NULL;
}
}
if(!c->running && /* read from a file */
c->newsession && /* clean session cookies */
!co->expires) { /* this is a session cookie since it doesn't expire! */
freecookie(co);
return NULL;
}
co->livecookie = c->running;
co->creationtime = ++c->lastct;
/*
* Now we have parsed the incoming line, we must now check if this supersedes
* an already existing cookie, which it may if the previous have the same
* domain and path as this.
*/
/* at first, remove expired cookies */
if(!noexpire)
remove_expired(c);
#ifdef USE_LIBPSL
/*
* Check if the domain is a Public Suffix and if yes, ignore the cookie. We
* must also check that the data handle isn't NULL since the psl code will
* dereference it.
*/
if(data && (domain && co->domain && !Curl_host_is_ipnum(co->domain))) {
const psl_ctx_t *psl = Curl_psl_use(data);
int acceptable;
if(psl) {
acceptable = psl_is_cookie_domain_acceptable(psl, domain, co->domain);
Curl_psl_release(data);
}
else
acceptable = !bad_domain(domain);
if(!acceptable) {
infof(data, "cookie '%s' dropped, domain '%s' must not "
"set cookies for '%s'", co->name, domain, co->domain);
freecookie(co);
return NULL;
}
}
#endif
/* A non-secure cookie may not overlay an existing secure cookie. */
myhash = cookiehash(co->domain);
clist = c->cookies[myhash];
while(clist) {
if(strcasecompare(clist->name, co->name)) {
/* the names are identical */
bool matching_domains = FALSE;
if(clist->domain && co->domain) {
if(strcasecompare(clist->domain, co->domain))
/* The domains are identical */
matching_domains = TRUE;
}
else if(!clist->domain && !co->domain)
matching_domains = TRUE;
if(matching_domains && /* the domains were identical */
clist->spath && co->spath && /* both have paths */
clist->secure && !co->secure && !secure) {
size_t cllen;
const char *sep;
/*
* A non-secure cookie may not overlay an existing secure cookie.
* For an existing cookie "a" with path "/login", refuse a new
* cookie "a" with for example path "/login/en", while the path
* "/loginhelper" is ok.
*/
sep = strchr(clist->spath + 1, '/');
if(sep)
cllen = sep - clist->spath;
else
cllen = strlen(clist->spath);
if(strncasecompare(clist->spath, co->spath, cllen)) {
infof(data, "cookie '%s' for domain '%s' dropped, would "
"overlay an existing cookie", co->name, co->domain);
freecookie(co);
return NULL;
}
}
}
if(!replace_co && strcasecompare(clist->name, co->name)) {
/* the names are identical */
if(clist->domain && co->domain) {
if(strcasecompare(clist->domain, co->domain) &&
(clist->tailmatch == co->tailmatch))
/* The domains are identical */
replace_old = TRUE;
}
else if(!clist->domain && !co->domain)
replace_old = TRUE;
if(replace_old) {
/* the domains were identical */
if(clist->spath && co->spath) {
if(strcasecompare(clist->spath, co->spath))
replace_old = TRUE;
else
replace_old = FALSE;
}
else if(!clist->spath && !co->spath)
replace_old = TRUE;
else
replace_old = FALSE;
}
if(replace_old && !co->livecookie && clist->livecookie) {
/*
* Both cookies matched fine, except that the already present cookie is
* "live", which means it was set from a header, while the new one was
* read from a file and thus isn't "live". "live" cookies are preferred
* so the new cookie is freed.
*/
freecookie(co);
return NULL;
}
if(replace_old) {
replace_co = co;
replace_clist = clist;
}
}
lastc = clist;
clist = clist->next;
}
if(replace_co) {
co = replace_co;
clist = replace_clist;
co->next = clist->next; /* get the next-pointer first */
/* when replacing, creationtime is kept from old */
co->creationtime = clist->creationtime;
/* then free all the old pointers */
free(clist->name);
free(clist->value);
free(clist->domain);
free(clist->path);
free(clist->spath);
free(clist->expirestr);
free(clist->version);
free(clist->maxage);
*clist = *co; /* then store all the new data */
free(co); /* free the newly allocated memory */
co = clist;
}
if(c->running)
/* Only show this when NOT reading the cookies from a file */
infof(data, "%s cookie %s=\"%s\" for domain %s, path %s, "
"expire %" CURL_FORMAT_CURL_OFF_T,
replace_old?"Replaced":"Added", co->name, co->value,
co->domain, co->path, co->expires);
if(!replace_old) {
/* then make the last item point on this new one */
if(lastc)
lastc->next = co;
else
c->cookies[myhash] = co;
c->numcookies++; /* one more cookie in the jar */
}
/*
* Now that we've added a new cookie to the jar, update the expiration
* tracker in case it is the next one to expire.
*/
if(co->expires && (co->expires < c->next_expiration))
c->next_expiration = co->expires;
return co;
}
| null | null | 197,805
|
340011739869148140831706810976304098697
| 717
|
cookie: reject cookies with "control bytes"
Rejects 0x01 - 0x1f (except 0x09) plus 0x7f
Reported-by: Axel Chong
Bug: https://curl.se/docs/CVE-2022-35252.html
CVE-2022-35252
Closes #9381
|
other
|
jerryscript
|
f3a420b672927037beb4508d7bdd68fb25d2caf6
| 1
|
lexer_expect_object_literal_id (parser_context_t *context_p, /**< context */
uint32_t ident_opts) /**< lexer_obj_ident_opts_t option bits */
{
lexer_skip_spaces (context_p);
if (context_p->source_p >= context_p->source_end_p)
{
parser_raise_error (context_p, PARSER_ERR_PROPERTY_IDENTIFIER_EXPECTED);
}
context_p->token.keyword_type = LEXER_EOS;
context_p->token.line = context_p->line;
context_p->token.column = context_p->column;
bool create_literal_object = false;
JERRY_ASSERT ((ident_opts & LEXER_OBJ_IDENT_CLASS_IDENTIFIER) || !(ident_opts & LEXER_OBJ_IDENT_CLASS_NO_STATIC));
#if JERRY_FUNCTION_TO_STRING
if (ident_opts & LEXER_OBJ_IDENT_SET_FUNCTION_START)
{
context_p->function_start_p = context_p->source_p;
}
#endif /* JERRY_FUNCTION_TO_STRING */
if (lexer_parse_identifier (context_p, LEXER_PARSE_NO_OPTS))
{
if (!(ident_opts & (LEXER_OBJ_IDENT_ONLY_IDENTIFIERS | LEXER_OBJ_IDENT_OBJECT_PATTERN)))
{
lexer_skip_spaces (context_p);
context_p->token.flags = (uint8_t) (context_p->token.flags | LEXER_NO_SKIP_SPACES);
if (context_p->source_p < context_p->source_end_p
#if JERRY_ESNEXT
&& context_p->source_p[0] != LIT_CHAR_COMMA && context_p->source_p[0] != LIT_CHAR_RIGHT_BRACE
&& context_p->source_p[0] != LIT_CHAR_LEFT_PAREN && context_p->source_p[0] != LIT_CHAR_SEMICOLON
&& context_p->source_p[0] != LIT_CHAR_EQUALS
#endif /* JERRY_ESNEXT */
&& context_p->source_p[0] != LIT_CHAR_COLON)
{
if (lexer_compare_literal_to_string (context_p, "get", 3))
{
context_p->token.type = LEXER_PROPERTY_GETTER;
return;
}
if (lexer_compare_literal_to_string (context_p, "set", 3))
{
context_p->token.type = LEXER_PROPERTY_SETTER;
return;
}
#if JERRY_ESNEXT
if (lexer_compare_literal_to_string (context_p, "async", 5))
{
context_p->token.type = LEXER_KEYW_ASYNC;
return;
}
if (ident_opts & LEXER_OBJ_IDENT_CLASS_NO_STATIC)
{
if (lexer_compare_literal_to_string (context_p, "static", 6))
{
context_p->token.type = LEXER_KEYW_STATIC;
}
return;
}
#endif /* JERRY_ESNEXT */
}
}
create_literal_object = true;
}
#if JERRY_ESNEXT
else if (ident_opts & LEXER_OBJ_IDENT_CLASS_PRIVATE)
{
parser_raise_error (context_p, PARSER_ERR_INVALID_CHARACTER);
}
#endif /* JERRY_ESNEXT */
else
{
switch (context_p->source_p[0])
{
case LIT_CHAR_DOUBLE_QUOTE:
case LIT_CHAR_SINGLE_QUOTE:
{
lexer_parse_string (context_p, LEXER_STRING_NO_OPTS);
create_literal_object = true;
break;
}
#if JERRY_ESNEXT
case LIT_CHAR_LEFT_SQUARE:
{
#if JERRY_FUNCTION_TO_STRING
const uint8_t *function_start_p = context_p->function_start_p;
#endif /* JERRY_FUNCTION_TO_STRING */
lexer_consume_next_character (context_p);
lexer_next_token (context_p);
parser_parse_expression (context_p, PARSE_EXPR_NO_COMMA);
if (context_p->token.type != LEXER_RIGHT_SQUARE)
{
parser_raise_error (context_p, PARSER_ERR_RIGHT_SQUARE_EXPECTED);
}
#if JERRY_FUNCTION_TO_STRING
context_p->function_start_p = function_start_p;
#endif /* JERRY_FUNCTION_TO_STRING */
return;
}
case LIT_CHAR_ASTERISK:
{
if (ident_opts & (LEXER_OBJ_IDENT_ONLY_IDENTIFIERS | LEXER_OBJ_IDENT_OBJECT_PATTERN))
{
break;
}
context_p->token.type = LEXER_MULTIPLY;
lexer_consume_next_character (context_p);
return;
}
case LIT_CHAR_HASHMARK:
{
if (ident_opts & LEXER_OBJ_IDENT_CLASS_IDENTIFIER)
{
context_p->token.type = LEXER_HASHMARK;
return;
}
break;
}
#endif /* JERRY_ESNEXT */
case LIT_CHAR_LEFT_BRACE:
{
if (ident_opts & (LEXER_OBJ_IDENT_CLASS_NO_STATIC | LEXER_OBJ_IDENT_CLASS_PRIVATE))
{
break;
}
context_p->token.type = LEXER_LEFT_BRACE;
lexer_consume_next_character (context_p);
return;
}
case LIT_CHAR_RIGHT_BRACE:
{
if (ident_opts & LEXER_OBJ_IDENT_ONLY_IDENTIFIERS)
{
break;
}
context_p->token.type = LEXER_RIGHT_BRACE;
lexer_consume_next_character (context_p);
return;
}
#if JERRY_ESNEXT
case LIT_CHAR_DOT:
{
if (!(context_p->source_p + 1 >= context_p->source_end_p || lit_char_is_decimal_digit (context_p->source_p[1])))
{
if ((ident_opts & ((uint32_t) ~(LEXER_OBJ_IDENT_OBJECT_PATTERN | LEXER_OBJ_IDENT_SET_FUNCTION_START)))
|| context_p->source_p + 2 >= context_p->source_end_p || context_p->source_p[1] != LIT_CHAR_DOT
|| context_p->source_p[2] != LIT_CHAR_DOT)
{
break;
}
context_p->token.type = LEXER_THREE_DOTS;
context_p->token.flags &= (uint8_t) ~LEXER_NO_SKIP_SPACES;
PARSER_PLUS_EQUAL_LC (context_p->column, 3);
context_p->source_p += 3;
return;
}
/* FALLTHRU */
}
#endif /* JERRY_ESNEXT */
default:
{
const uint8_t *char_p = context_p->source_p;
if (char_p[0] == LIT_CHAR_DOT)
{
char_p++;
}
if (char_p < context_p->source_end_p && char_p[0] >= LIT_CHAR_0 && char_p[0] <= LIT_CHAR_9)
{
lexer_parse_number (context_p);
if (!(ident_opts & LEXER_OBJ_IDENT_CLASS_IDENTIFIER))
{
lexer_construct_number_object (context_p, false, false);
}
return;
}
break;
}
}
}
if (create_literal_object)
{
#if JERRY_ESNEXT
if (ident_opts & LEXER_OBJ_IDENT_CLASS_IDENTIFIER)
{
return;
}
if (ident_opts & LEXER_OBJ_IDENT_CLASS_PRIVATE)
{
parser_resolve_private_identifier (context_p);
return;
}
#endif /* JERRY_ESNEXT */
lexer_construct_literal_object (context_p, &context_p->token.lit_location, LEXER_STRING_LITERAL);
return;
}
parser_raise_error (context_p, PARSER_ERR_PROPERTY_IDENTIFIER_EXPECTED);
} /* lexer_expect_object_literal_id */
| null | null | 197,825
|
18090551596235490242321914030640150439
| 221
|
Fix class static block opening brace parsing (#4942)
The next character should not be consumed after finding the static block opening brace.
This patch fixes #4916.
JerryScript-DCO-1.0-Signed-off-by: Martin Negyokru [email protected]
|
other
|
tensorflow
|
7731e8dfbe4a56773be5dc94d631611211156659
| 1
|
bool IsConstantFoldable(
const Node* n,
const std::unordered_map<string, std::vector<PartialTensorShape>>*
shape_map,
const std::function<bool(const Node*)>& consider,
int64_t max_constant_size_in_bytes,
std::unordered_map<const Node*, std::vector<Tensor>>*
shape_replacement_map) {
if (n->IsConstant()) {
return true;
}
if (MaybeReplaceShapeOp(n, shape_map, shape_replacement_map)) {
return true;
}
if (n->op_def().is_stateful()) {
return false;
}
if (consider && !consider(n)) {
return false;
}
if (shape_map != nullptr) {
// We can skip the node if an output is known to be oversized.
auto shape_it = shape_map->find(n->name());
if (shape_it != shape_map->end()) {
for (int64_t i = 0; i < shape_it->second.size(); ++i) {
const auto& out_shape = shape_it->second[i];
if (out_shape.IsFullyDefined() &&
out_shape.num_elements() * DataTypeSize(n->output_type(i)) >
max_constant_size_in_bytes) {
return false;
}
}
}
}
if (n->IsControlFlow() || n->IsSend() || n->IsRecv()) {
return false;
}
// TODO(yuanbyu): For now disable these session handle operations.
if (n->IsGetSessionHandle() || n->IsGetSessionTensor() ||
n->IsDeleteSessionTensor()) {
return false;
}
if (n->IsSource()) {
return false;
}
if (n->IsSink()) {
return false;
}
if (n->IsFakeParam()) {
return false;
}
// Since constant-folding runs on the CPU, do not attempt to constant-fold
// operators that have no CPU kernel. Also implies that we will not
// constant-fold functions.
// TODO(phawkins): allow constant-folding for functions; functions may
// be arbitrarily expensive to execute.
if (!KernelDefAvailable(DeviceType(DEVICE_CPU), n->def())) {
return false;
}
// Do not constant fold nodes which will be allocated by ScopedAllocator.
// This is because the constant-folding graph will not contain the
// `_ScopedAllocator` node, and that is necessary to be able to run a node
// that will use this allocator.
if (n->attrs().Find(kScopedAllocatorAttrName) != nullptr) {
VLOG(2) << "Skip node [" << n->DebugString()
<< "] for constant folding due to scoped allocator";
return false;
}
return true;
}
| null | null | 197,826
|
172373573712419656450812250984956481158
| 70
|
Don't constant-fold DT_RESOURCE constants.
PiperOrigin-RevId: 391803952
Change-Id: I0ea3ec31d3e7dfda0f03b4027a237f08d00a3091
|
other
|
samba
|
d92dfb0dabf9cfccb86f2b1146d6c353af2e1435
| 1
|
static NTSTATUS ldapsrv_SearchRequest(struct ldapsrv_call *call)
{
struct ldap_SearchRequest *req = &call->request->r.SearchRequest;
struct ldap_Result *done;
struct ldapsrv_reply *done_r;
TALLOC_CTX *local_ctx;
struct ldapsrv_context *callback_ctx = NULL;
struct ldb_context *samdb = talloc_get_type(call->conn->ldb, struct ldb_context);
struct ldb_dn *basedn;
struct ldb_request *lreq;
struct ldb_control *search_control;
struct ldb_search_options_control *search_options;
struct ldb_control *extended_dn_control;
struct ldb_extended_dn_control *extended_dn_decoded = NULL;
struct ldb_control *notification_control = NULL;
enum ldb_scope scope = LDB_SCOPE_DEFAULT;
const char **attrs = NULL;
const char *scope_str, *errstr = NULL;
int result = -1;
int ldb_ret = -1;
unsigned int i;
int extended_type = 1;
DEBUG(10, ("SearchRequest"));
DEBUGADD(10, (" basedn: %s", req->basedn));
DEBUGADD(10, (" filter: %s\n", ldb_filter_from_tree(call, req->tree)));
local_ctx = talloc_new(call);
NT_STATUS_HAVE_NO_MEMORY(local_ctx);
basedn = ldb_dn_new(local_ctx, samdb, req->basedn);
NT_STATUS_HAVE_NO_MEMORY(basedn);
DEBUG(10, ("SearchRequest: basedn: [%s]\n", req->basedn));
DEBUG(10, ("SearchRequest: filter: [%s]\n", ldb_filter_from_tree(call, req->tree)));
switch (req->scope) {
case LDAP_SEARCH_SCOPE_BASE:
scope_str = "BASE";
scope = LDB_SCOPE_BASE;
break;
case LDAP_SEARCH_SCOPE_SINGLE:
scope_str = "ONE";
scope = LDB_SCOPE_ONELEVEL;
break;
case LDAP_SEARCH_SCOPE_SUB:
scope_str = "SUB";
scope = LDB_SCOPE_SUBTREE;
break;
default:
result = LDAP_PROTOCOL_ERROR;
map_ldb_error(local_ctx, LDB_ERR_PROTOCOL_ERROR, NULL,
&errstr);
errstr = talloc_asprintf(local_ctx,
"%s. Invalid scope", errstr);
goto reply;
}
DEBUG(10,("SearchRequest: scope: [%s]\n", scope_str));
if (req->num_attributes >= 1) {
attrs = talloc_array(local_ctx, const char *, req->num_attributes+1);
NT_STATUS_HAVE_NO_MEMORY(attrs);
for (i=0; i < req->num_attributes; i++) {
DEBUG(10,("SearchRequest: attrs: [%s]\n",req->attributes[i]));
attrs[i] = req->attributes[i];
}
attrs[i] = NULL;
}
DEBUG(5,("ldb_request %s dn=%s filter=%s\n",
scope_str, req->basedn, ldb_filter_from_tree(call, req->tree)));
callback_ctx = talloc_zero(local_ctx, struct ldapsrv_context);
NT_STATUS_HAVE_NO_MEMORY(callback_ctx);
callback_ctx->call = call;
callback_ctx->extended_type = extended_type;
callback_ctx->attributesonly = req->attributesonly;
ldb_ret = ldb_build_search_req_ex(&lreq, samdb, local_ctx,
basedn, scope,
req->tree, attrs,
call->request->controls,
callback_ctx,
ldap_server_search_callback,
NULL);
if (ldb_ret != LDB_SUCCESS) {
goto reply;
}
if (call->conn->global_catalog) {
search_control = ldb_request_get_control(lreq, LDB_CONTROL_SEARCH_OPTIONS_OID);
search_options = NULL;
if (search_control) {
search_options = talloc_get_type(search_control->data, struct ldb_search_options_control);
search_options->search_options |= LDB_SEARCH_OPTION_PHANTOM_ROOT;
} else {
search_options = talloc(lreq, struct ldb_search_options_control);
NT_STATUS_HAVE_NO_MEMORY(search_options);
search_options->search_options = LDB_SEARCH_OPTION_PHANTOM_ROOT;
ldb_request_add_control(lreq, LDB_CONTROL_SEARCH_OPTIONS_OID, false, search_options);
}
} else {
ldb_request_add_control(lreq, DSDB_CONTROL_NO_GLOBAL_CATALOG, false, NULL);
}
extended_dn_control = ldb_request_get_control(lreq, LDB_CONTROL_EXTENDED_DN_OID);
if (extended_dn_control) {
if (extended_dn_control->data) {
extended_dn_decoded = talloc_get_type(extended_dn_control->data, struct ldb_extended_dn_control);
extended_type = extended_dn_decoded->type;
} else {
extended_type = 0;
}
callback_ctx->extended_type = extended_type;
}
notification_control = ldb_request_get_control(lreq, LDB_CONTROL_NOTIFICATION_OID);
if (notification_control != NULL) {
const struct ldapsrv_call *pc = NULL;
size_t count = 0;
for (pc = call->conn->pending_calls; pc != NULL; pc = pc->next) {
count += 1;
}
if (count >= call->conn->limits.max_notifications) {
DEBUG(10,("SearchRequest: error MaxNotificationPerConn\n"));
result = map_ldb_error(local_ctx,
LDB_ERR_ADMIN_LIMIT_EXCEEDED,
"MaxNotificationPerConn reached",
&errstr);
goto reply;
}
/*
* For now we need to do periodic retries on our own.
* As the dsdb_notification module will return after each run.
*/
call->notification.busy = true;
}
{
const char *scheme = NULL;
switch (call->conn->referral_scheme) {
case LDAP_REFERRAL_SCHEME_LDAPS:
scheme = "ldaps";
break;
default:
scheme = "ldap";
}
ldb_ret = ldb_set_opaque(
samdb,
LDAP_REFERRAL_SCHEME_OPAQUE,
discard_const_p(char *, scheme));
if (ldb_ret != LDB_SUCCESS) {
goto reply;
}
}
{
time_t timeout = call->conn->limits.search_timeout;
if (timeout == 0
|| (req->timelimit != 0
&& req->timelimit < timeout))
{
timeout = req->timelimit;
}
ldb_set_timeout(samdb, lreq, timeout);
}
if (!call->conn->is_privileged) {
ldb_req_mark_untrusted(lreq);
}
LDB_REQ_SET_LOCATION(lreq);
ldb_ret = ldb_request(samdb, lreq);
if (ldb_ret != LDB_SUCCESS) {
goto reply;
}
ldb_ret = ldb_wait(lreq->handle, LDB_WAIT_ALL);
if (ldb_ret == LDB_SUCCESS) {
if (call->notification.busy) {
/* Move/Add it to the end */
DLIST_DEMOTE(call->conn->pending_calls, call);
call->notification.generation =
call->conn->service->notification.generation;
if (callback_ctx->count != 0) {
call->notification.generation += 1;
ldapsrv_notification_retry_setup(call->conn->service,
true);
}
talloc_free(local_ctx);
return NT_STATUS_OK;
}
}
reply:
DLIST_REMOVE(call->conn->pending_calls, call);
call->notification.busy = false;
done_r = ldapsrv_init_reply(call, LDAP_TAG_SearchResultDone);
NT_STATUS_HAVE_NO_MEMORY(done_r);
done = &done_r->msg->r.SearchResultDone;
done->dn = NULL;
done->referral = NULL;
if (result != -1) {
} else if (ldb_ret == LDB_SUCCESS) {
if (callback_ctx->controls) {
done_r->msg->controls = callback_ctx->controls;
talloc_steal(done_r->msg, callback_ctx->controls);
}
result = LDB_SUCCESS;
} else {
DEBUG(10,("SearchRequest: error\n"));
result = map_ldb_error(local_ctx, ldb_ret, ldb_errstring(samdb),
&errstr);
}
done->resultcode = result;
done->errormessage = (errstr?talloc_strdup(done_r, errstr):NULL);
talloc_free(local_ctx);
return ldapsrv_queue_reply_forced(call, done_r);
}
| null | null | 197,830
|
314918662591421001470003516678988238334
| 238
|
CVE-2021-3670 ldap_server: Remove duplicate print of LDAP search details
BUG: https://bugzilla.samba.org/show_bug.cgi?id=14694
Signed-off-by: Andrew Bartlett <[email protected]>
Reviewed-by: Douglas Bagnall <[email protected]>
(cherry picked from commit 2b3af3b560c9617a233c131376c870fce146c002)
|
other
|
openscad
|
b81369dffc3f385257a9b1f5c271118a88671d6d
| 1
|
static std::string getComment(const std::string &fulltext, int line)
{
if (line < 1) return "";
// Locate line
unsigned int start = 0;
for (; start<fulltext.length() ; ++start) {
if (line <= 1) break;
if (fulltext[start] == '\n') line--;
}
int end = start + 1;
while (fulltext[end] != '\n') end++;
std::string comment = fulltext.substr(start, end - start);
// Locate comment
unsigned int startText = 0;
int noOfSemicolon = 0;
bool inString = false;
for (; startText < comment.length() - 1; ++startText) {
if (inString && comment.compare(startText, 2, "\\\"") == 0) {
startText++;
continue;
}
if (comment[startText] == '"') inString = !inString;
if (!inString) {
if (comment.compare(startText, 2, "//") == 0) break;
if (comment[startText] == ';' && noOfSemicolon > 0) return "";
if (comment[startText] == ';') noOfSemicolon++;
}
}
if (startText + 2 > comment.length()) return "";
std::string result = comment.substr(startText + 2);
return result;
}
| null | null | 197,891
|
313568077470000783177680974177297728787
| 38
|
Add file bounds check to comment parser
|
other
|
crun
|
1aeeed2e4fdeffb4875c0d0b439915894594c8c6
| 1
|
crun_command_exec (struct crun_global_arguments *global_args, int argc, char **argv, libcrun_error_t *err)
{
int first_arg = 0, ret = 0;
libcrun_context_t crun_context = {
0,
};
cleanup_process_schema runtime_spec_schema_config_schema_process *process = NULL;
struct libcrun_container_exec_options_s exec_opts;
memset (&exec_opts, 0, sizeof (exec_opts));
exec_opts.struct_size = sizeof (exec_opts);
crun_context.preserve_fds = 0;
crun_context.listen_fds = 0;
argp_parse (&run_argp, argc, argv, ARGP_IN_ORDER, &first_arg, &exec_options);
crun_assert_n_args (argc - first_arg, exec_options.process ? 1 : 2, -1);
ret = init_libcrun_context (&crun_context, argv[first_arg], global_args, err);
if (UNLIKELY (ret < 0))
return ret;
crun_context.detach = exec_options.detach;
crun_context.console_socket = exec_options.console_socket;
crun_context.pid_file = exec_options.pid_file;
crun_context.preserve_fds = exec_options.preserve_fds;
if (getenv ("LISTEN_FDS"))
{
crun_context.listen_fds = strtoll (getenv ("LISTEN_FDS"), NULL, 10);
crun_context.preserve_fds += crun_context.listen_fds;
}
if (exec_options.process)
exec_opts.path = exec_options.process;
else
{
process = xmalloc0 (sizeof (*process));
int i;
process->args_len = argc;
process->args = xmalloc0 ((argc + 1) * sizeof (*process->args));
for (i = 0; i < argc - first_arg; i++)
process->args[i] = xstrdup (argv[first_arg + i + 1]);
process->args[i] = NULL;
if (exec_options.cwd)
process->cwd = exec_options.cwd;
process->terminal = exec_options.tty;
process->env = exec_options.env;
process->env_len = exec_options.env_size;
process->user = make_oci_process_user (exec_options.user);
if (exec_options.process_label != NULL)
process->selinux_label = exec_options.process_label;
if (exec_options.apparmor != NULL)
process->apparmor_profile = exec_options.apparmor;
if (exec_options.cap_size > 0)
{
runtime_spec_schema_config_schema_process_capabilities *capabilities
= xmalloc (sizeof (runtime_spec_schema_config_schema_process_capabilities));
capabilities->effective = exec_options.cap;
capabilities->effective_len = exec_options.cap_size;
capabilities->inheritable = dup_array (exec_options.cap, exec_options.cap_size);
capabilities->inheritable_len = exec_options.cap_size;
capabilities->bounding = dup_array (exec_options.cap, exec_options.cap_size);
capabilities->bounding_len = exec_options.cap_size;
capabilities->ambient = dup_array (exec_options.cap, exec_options.cap_size);
capabilities->ambient_len = exec_options.cap_size;
capabilities->permitted = dup_array (exec_options.cap, exec_options.cap_size);
capabilities->permitted_len = exec_options.cap_size;
process->capabilities = capabilities;
}
// noNewPriviledges will remain `false` if basespec has `false` unless specified
// Default is always `true` in generated basespec config
if (exec_options.no_new_privs)
process->no_new_privileges = 1;
exec_opts.process = process;
}
exec_opts.cgroup = exec_options.cgroup;
return libcrun_container_exec_with_options (&crun_context, argv[first_arg], &exec_opts, err);
}
| null | null | 197,973
|
67555112620788823184421673030526843273
| 93
|
exec: --cap do not set inheritable capabilities
Closes: CVE-2022-27650
Signed-off-by: Giuseppe Scrivano <[email protected]>
|
other
|
PrimeVul Original Test Dataset (Lite for reproducing Code-TREAT results)
Overview
This dataset contains the original test split from the PrimeVul dataset, provided for reproducibility purposes. The data is sourced from the paper "PrimeVul: Vulnerability Detection with Code Language Models: How Far Are We?" and includes both the default (single functions) and paired (vulnerable/non-vulnerable pairs) configurations.
Citation
If you use this dataset, please cite the original PrimeVul paper:
@article{primevul2024,
title={PrimeVul: Vulnerability Detection with Code Language Models: How Far Are We?},
author={[Authors from the original paper]},
journal={arXiv preprint arXiv:2403.18624},
year={2024},
url={https://arxiv.org/abs/2403.18624}
}
Dataset Configurations
- Description: Single function vulnerability detection dataset
- Size: 25,911 test samples
- Format: Each sample contains a single code function with binary vulnerability label
- Fields:
project: Source project namecommit_id: Git commit hashtarget: Binary label (0=non-vulnerable, 1=vulnerable)func: Source code functioncwe: Common Weakness Enumeration categoriesidx: Unique sample identifierhash: Function hash- Additional metadata fields
Data Source
The original JSONL files are available from the PrimeVul authors at:
- Google Drive: https://drive.google.com/drive/folders/19iLaNDS0z99N8kB_jBRTmDLehwZBolMY
- GitHub Repository: https://github.com/DLVulDet/PrimeVul
Data Format
This dataset provides the test splits in Parquet format for easy loading with HuggingFace datasets. The original data was in JSONL format and has been converted while preserving all original fields and values.
Usage
from datasets import load_dataset
# Load the default configuration (single functions)
dataset_default = load_dataset("Code-TREAT/PrimeVul_original", "default")
# Load the paired configuration
dataset_paired = load_dataset("Code-TREAT/PrimeVul_original", "paired")
# Access test split
test_data_default = dataset_default["test"]
test_data_paired = dataset_paired["test"]
Purpose
This dataset is provided by the Code-TREAT project to ensure reproducibility and consistency in vulnerability detection research. By providing the exact test splits used in evaluations, researchers can:
- Reproduce results from papers using this dataset
- Compare methods fairly using identical test data
- Validate new approaches against established benchmarks
License
Please refer to the original PrimeVul repository for licensing information: https://github.com/DLVulDet/PrimeVul
Acknowledgments
We thank the authors of PrimeVul for making their dataset publicly available and for their contributions to vulnerability detection research.
Contact
For questions about this dataset distribution, please refer to the original PrimeVul repository or the Code-TREAT project.
- Downloads last month
- 19