ambrosehui/codet5p-220m-vulnerability-scanner-it
Text Classification
•
Updated
func_before
stringlengths 14
241k
| vul
int64 0
1
| CVE ID
stringlengths 13
43
⌀ | CWE ID
stringclasses 91
values |
|---|---|---|---|
static void ffs_data_clear(struct ffs_data *ffs)
{
ENTER();
ffs_closed(ffs);
BUG_ON(ffs->gadget);
if (ffs->epfiles)
ffs_epfiles_destroy(ffs->epfiles, ffs->eps_count);
if (ffs->ffs_eventfd)
eventfd_ctx_put(ffs->ffs_eventfd);
kfree(ffs->raw_descs_data);
kfree(ffs->raw_strings);
kfree(ffs->stringtabs);
}
| 0
|
CVE-2016-7912
|
CWE-416
|
virtual void AddNetworkObserver(const std::string& service_path,
NetworkObserver* observer) {}
| 0
| null | null |
int reds_on_migrate_dst_set_seamless(MainChannelClient *mcc, uint32_t src_version)
{
/* seamless migration is not supported with multiple clients*/
if (reds->allow_multiple_clients || src_version > SPICE_MIGRATION_PROTOCOL_VERSION) {
reds->dst_do_seamless_migrate = FALSE;
} else {
RedChannelClient *rcc = main_channel_client_get_base(mcc);
red_client_set_migration_seamless(rcc->client);
/* linking all the channels that have been connected before migration handshake */
reds->dst_do_seamless_migrate = reds_link_mig_target_channels(rcc->client);
}
return reds->dst_do_seamless_migrate;
}
| 0
|
CVE-2013-4282
|
CWE-119
|
static void encode_lockowner(struct xdr_stream *xdr, const struct nfs_lowner *lowner)
{
__be32 *p;
p = reserve_space(xdr, 32);
p = xdr_encode_hyper(p, lowner->clientid);
*p++ = cpu_to_be32(20);
p = xdr_encode_opaque_fixed(p, "lock id:", 8);
*p++ = cpu_to_be32(lowner->s_dev);
xdr_encode_hyper(p, lowner->id);
}
| 0
|
CVE-2011-4131
|
CWE-189
|
void WebLocalFrameImpl::CheckCompleted() {
GetFrame()->GetDocument()->CheckCompleted();
}
| 0
|
CVE-2018-17468
|
CWE-200
|
std::string ShellContentClient::GetUserAgent(bool* overriding) const {
*overriding = false;
return std::string("Chrome/15.16.17.18");
}
| 0
|
CVE-2011-3084
|
CWE-264
|
static int do_last(struct nameidata *nd,
struct file *file, const struct open_flags *op,
int *opened)
{
struct dentry *dir = nd->path.dentry;
int open_flag = op->open_flag;
bool will_truncate = (open_flag & O_TRUNC) != 0;
bool got_write = false;
int acc_mode = op->acc_mode;
unsigned seq;
struct inode *inode;
struct path save_parent = { .dentry = NULL, .mnt = NULL };
struct path path;
bool retried = false;
int error;
nd->flags &= ~LOOKUP_PARENT;
nd->flags |= op->intent;
if (nd->last_type != LAST_NORM) {
error = handle_dots(nd, nd->last_type);
if (unlikely(error))
return error;
goto finish_open;
}
if (!(open_flag & O_CREAT)) {
if (nd->last.name[nd->last.len])
nd->flags |= LOOKUP_FOLLOW | LOOKUP_DIRECTORY;
/* we _can_ be in RCU mode here */
error = lookup_fast(nd, &path, &inode, &seq);
if (likely(!error))
goto finish_lookup;
if (error < 0)
return error;
BUG_ON(nd->inode != dir->d_inode);
} else {
/* create side of things */
/*
* This will *only* deal with leaving RCU mode - LOOKUP_JUMPED
* has been cleared when we got to the last component we are
* about to look up
*/
error = complete_walk(nd);
if (error)
return error;
audit_inode(nd->name, dir, LOOKUP_PARENT);
/* trailing slashes? */
if (unlikely(nd->last.name[nd->last.len]))
return -EISDIR;
}
retry_lookup:
if (op->open_flag & (O_CREAT | O_TRUNC | O_WRONLY | O_RDWR)) {
error = mnt_want_write(nd->path.mnt);
if (!error)
got_write = true;
/*
* do _not_ fail yet - we might not need that or fail with
* a different error; let lookup_open() decide; we'll be
* dropping this one anyway.
*/
}
mutex_lock(&dir->d_inode->i_mutex);
error = lookup_open(nd, &path, file, op, got_write, opened);
mutex_unlock(&dir->d_inode->i_mutex);
if (error <= 0) {
if (error)
goto out;
if ((*opened & FILE_CREATED) ||
!S_ISREG(file_inode(file)->i_mode))
will_truncate = false;
audit_inode(nd->name, file->f_path.dentry, 0);
goto opened;
}
if (*opened & FILE_CREATED) {
/* Don't check for write permission, don't truncate */
open_flag &= ~O_TRUNC;
will_truncate = false;
acc_mode = MAY_OPEN;
path_to_nameidata(&path, nd);
goto finish_open_created;
}
/*
* create/update audit record if it already exists.
*/
if (d_is_positive(path.dentry))
audit_inode(nd->name, path.dentry, 0);
/*
* If atomic_open() acquired write access it is dropped now due to
* possible mount and symlink following (this might be optimized away if
* necessary...)
*/
if (got_write) {
mnt_drop_write(nd->path.mnt);
got_write = false;
}
if (unlikely((open_flag & (O_EXCL | O_CREAT)) == (O_EXCL | O_CREAT))) {
path_to_nameidata(&path, nd);
return -EEXIST;
}
error = follow_managed(&path, nd);
if (unlikely(error < 0))
return error;
BUG_ON(nd->flags & LOOKUP_RCU);
inode = d_backing_inode(path.dentry);
seq = 0; /* out of RCU mode, so the value doesn't matter */
if (unlikely(d_is_negative(path.dentry))) {
path_to_nameidata(&path, nd);
return -ENOENT;
}
finish_lookup:
if (nd->depth)
put_link(nd);
error = should_follow_link(nd, &path, nd->flags & LOOKUP_FOLLOW,
inode, seq);
if (unlikely(error))
return error;
if (unlikely(d_is_symlink(path.dentry)) && !(open_flag & O_PATH)) {
path_to_nameidata(&path, nd);
return -ELOOP;
}
if ((nd->flags & LOOKUP_RCU) || nd->path.mnt != path.mnt) {
path_to_nameidata(&path, nd);
} else {
save_parent.dentry = nd->path.dentry;
save_parent.mnt = mntget(path.mnt);
nd->path.dentry = path.dentry;
}
nd->inode = inode;
nd->seq = seq;
/* Why this, you ask? _Now_ we might have grown LOOKUP_JUMPED... */
finish_open:
error = complete_walk(nd);
if (error) {
path_put(&save_parent);
return error;
}
audit_inode(nd->name, nd->path.dentry, 0);
error = -EISDIR;
if ((open_flag & O_CREAT) && d_is_dir(nd->path.dentry))
goto out;
error = -ENOTDIR;
if ((nd->flags & LOOKUP_DIRECTORY) && !d_can_lookup(nd->path.dentry))
goto out;
if (!d_is_reg(nd->path.dentry))
will_truncate = false;
if (will_truncate) {
error = mnt_want_write(nd->path.mnt);
if (error)
goto out;
got_write = true;
}
finish_open_created:
error = may_open(&nd->path, acc_mode, open_flag);
if (error)
goto out;
BUG_ON(*opened & FILE_OPENED); /* once it's opened, it's opened */
error = vfs_open(&nd->path, file, current_cred());
if (!error) {
*opened |= FILE_OPENED;
} else {
if (error == -EOPENSTALE)
goto stale_open;
goto out;
}
opened:
error = open_check_o_direct(file);
if (error)
goto exit_fput;
error = ima_file_check(file, op->acc_mode, *opened);
if (error)
goto exit_fput;
if (will_truncate) {
error = handle_truncate(file);
if (error)
goto exit_fput;
}
out:
if (got_write)
mnt_drop_write(nd->path.mnt);
path_put(&save_parent);
return error;
exit_fput:
fput(file);
goto out;
stale_open:
/* If no saved parent or already retried then can't retry */
if (!save_parent.dentry || retried)
goto out;
BUG_ON(save_parent.dentry != dir);
path_put(&nd->path);
nd->path = save_parent;
nd->inode = dir->d_inode;
save_parent.mnt = NULL;
save_parent.dentry = NULL;
if (got_write) {
mnt_drop_write(nd->path.mnt);
got_write = false;
}
retried = true;
goto retry_lookup;
}
| 0
|
CVE-2015-2925
|
CWE-254
|
layer_get_tile(int layer, int x, int y)
{
struct map_tile* tile;
int width;
width = s_map->layers[layer].width;
tile = &s_map->layers[layer].tilemap[x + y * width];
return tile->tile_index;
}
| 0
|
CVE-2018-1000524
|
CWE-190
|
static unsigned int tty_poll(struct file *filp, poll_table *wait)
{
struct tty_struct *tty = file_tty(filp);
struct tty_ldisc *ld;
int ret = 0;
if (tty_paranoia_check(tty, filp->f_path.dentry->d_inode, "tty_poll"))
return 0;
ld = tty_ldisc_ref_wait(tty);
if (ld->ops->poll)
ret = (ld->ops->poll)(tty, filp, wait);
tty_ldisc_deref(ld);
return ret;
}
| 0
|
CVE-2011-5321
| null |
gst_asf_demux_check_activate_streams (GstASFDemux * demux, gboolean force)
{
guint i, actual_streams = 0;
if (demux->activated_streams)
return TRUE;
if (G_UNLIKELY (!gst_asf_demux_check_first_ts (demux, force)))
return FALSE;
if (!all_streams_prerolled (demux) && !force) {
GST_DEBUG_OBJECT (demux, "not all streams with data beyond preroll yet");
return FALSE;
}
for (i = 0; i < demux->num_streams; ++i) {
AsfStream *stream = &demux->stream[i];
if (stream->payloads->len > 0) {
if (stream->inspect_payload && /* dvr-ms required payload inspection */
!stream->active && /* do not inspect active streams (caps were already set) */
!gst_asf_demux_update_caps_from_payload (demux, stream) && /* failed to determine caps */
stream->payloads->len < 20) { /* if we couldn't determine the caps from 20 packets then just give up and use whatever was in codecTag */
/* try to gather some more data */
return FALSE;
}
/* we don't check mutual exclusion stuff here; either we have data for
* a stream, then we active it, or we don't, then we'll ignore it */
GST_LOG_OBJECT (stream->pad, "is prerolled - activate!");
gst_asf_demux_activate_stream (demux, stream);
actual_streams += 1;
} else {
GST_LOG_OBJECT (stream->pad, "no data, ignoring stream");
}
}
if (actual_streams == 0) {
/* We don't have any streams activated ! */
GST_ERROR_OBJECT (demux, "No streams activated!");
return FALSE;
}
gst_asf_demux_release_old_pads (demux);
demux->activated_streams = TRUE;
GST_LOG_OBJECT (demux, "signalling no more pads");
gst_element_no_more_pads (GST_ELEMENT (demux));
return TRUE;
}
| 0
|
CVE-2017-5847
|
CWE-125
|
drive_poll_media_completed_cb (DBusGMethodInvocation *context,
Device *device,
gboolean job_was_cancelled,
int status,
const char *stderr,
const char *stdout,
gpointer user_data)
{
if (WEXITSTATUS (status) == 0 && !job_was_cancelled)
{
device_generate_kernel_change_event (device);
dbus_g_method_return (context);
}
else
{
if (job_was_cancelled)
{
throw_error (context, ERROR_CANCELLED, "Job was cancelled");
}
else
{
throw_error (context,
ERROR_FAILED,
"Error detaching: helper exited with exit code %d: %s",
WEXITSTATUS (status),
stderr);
}
}
}
| 0
|
CVE-2010-1149
|
CWE-200
|
bool Microtask::performingCheckpoint(v8::Isolate* isolate)
{
return V8PerIsolateData::from(isolate)->performingMicrotaskCheckpoint();
}
| 0
|
CVE-2015-1281
|
CWE-254
|
static void mem_cgroup_update_tree(struct mem_cgroup *memcg, struct page *page)
{
unsigned long long excess;
struct mem_cgroup_per_zone *mz;
struct mem_cgroup_tree_per_zone *mctz;
int nid = page_to_nid(page);
int zid = page_zonenum(page);
mctz = soft_limit_tree_from_page(page);
/*
* Necessary to update all ancestors when hierarchy is used.
* because their event counter is not touched.
*/
for (; memcg; memcg = parent_mem_cgroup(memcg)) {
mz = mem_cgroup_zoneinfo(memcg, nid, zid);
excess = res_counter_soft_limit_excess(&memcg->res);
/*
* We have to update the tree if mz is on RB-tree or
* mem is over its softlimit.
*/
if (excess || mz->on_tree) {
spin_lock(&mctz->lock);
/* if on-tree, remove it */
if (mz->on_tree)
__mem_cgroup_remove_exceeded(memcg, mz, mctz);
/*
* Insert again. mz->usage_in_excess will be updated.
* If excess is 0, no tree ops.
*/
__mem_cgroup_insert_exceeded(memcg, mz, mctz, excess);
spin_unlock(&mctz->lock);
}
}
}
| 0
|
CVE-2012-1179
|
CWE-264
|
prng_uninit (void)
{
free (nonce_data);
nonce_data = NULL;
nonce_md = NULL;
nonce_secret_len = 0;
}
| 0
|
CVE-2013-2061
|
CWE-200
|
void gd_error(const char *format, ...)
{
va_list args;
va_start(args, format);
_gd_error_ex(GD_WARNING, format, args);
va_end(args);
}
| 0
|
CVE-2016-9317
|
CWE-20
|
void pdf_set_populating_xref_trailer(fz_context *ctx, pdf_document *doc, pdf_obj *trailer)
{
/* Update the trailer of the xref section being populated */
pdf_xref *xref = &doc->xref_sections[doc->num_xref_sections - 1];
if (xref->trailer)
{
pdf_drop_obj(ctx, xref->pre_repair_trailer);
xref->pre_repair_trailer = xref->trailer;
}
xref->trailer = pdf_keep_obj(ctx, trailer);
}
| 0
|
CVE-2017-17858
|
CWE-119
|
static int skcipher_null_crypt(struct blkcipher_desc *desc,
struct scatterlist *dst,
struct scatterlist *src, unsigned int nbytes)
{
struct blkcipher_walk walk;
int err;
blkcipher_walk_init(&walk, dst, src, nbytes);
err = blkcipher_walk_virt(desc, &walk);
while (walk.nbytes) {
if (walk.src.virt.addr != walk.dst.virt.addr)
memcpy(walk.dst.virt.addr, walk.src.virt.addr,
walk.nbytes);
err = blkcipher_walk_done(desc, &walk, 0);
}
return err;
}
| 0
|
CVE-2013-7421
|
CWE-264
|
static __inline USHORT CalculateIpPseudoHeaderChecksum(IPHeader *pIpHeader,
tTcpIpPacketParsingResult res,
USHORT headerAndPayloadLen)
{
if (res.ipStatus == ppresIPV4)
return CalculateIpv4PseudoHeaderChecksum(&pIpHeader->v4, headerAndPayloadLen);
if (res.ipStatus == ppresIPV6)
return CalculateIpv6PseudoHeaderChecksum(&pIpHeader->v6, headerAndPayloadLen);
return 0;
}
| 0
|
CVE-2015-3215
|
CWE-20
|
void Initialize(const char* url,
bool expected,
size_t file_size = kFileSize) {
InitializeWithCORS(url, expected, UrlData::CORS_UNSPECIFIED, file_size);
}
| 0
|
CVE-2018-18352
|
CWE-732
|
xmlXPathNewValueTree(xmlNodePtr val) {
xmlXPathObjectPtr ret;
ret = (xmlXPathObjectPtr) xmlMalloc(sizeof(xmlXPathObject));
if (ret == NULL) {
xmlXPathErrMemory(NULL, "creating result value tree\n");
return(NULL);
}
memset(ret, 0 , (size_t) sizeof(xmlXPathObject));
ret->type = XPATH_XSLT_TREE;
ret->boolval = 1;
ret->user = (void *) val;
ret->nodesetval = xmlXPathNodeSetCreate(val);
#ifdef XP_DEBUG_OBJ_USAGE
xmlXPathDebugObjUsageRequested(NULL, XPATH_XSLT_TREE);
#endif
return(ret);
}
| 0
| null | null |
ui::TextInputMode RenderWidgetHostViewAura::GetTextInputMode() const {
if (text_input_manager_ && text_input_manager_->GetTextInputState())
return text_input_manager_->GetTextInputState()->mode;
return ui::TEXT_INPUT_MODE_DEFAULT;
}
| 0
|
CVE-2016-1615
|
CWE-254
|
void GLES2DecoderImpl::DoStencilMaskSeparate(GLenum face, GLuint mask) {
if (face == GL_FRONT || face == GL_FRONT_AND_BACK) {
mask_stencil_front_ = mask;
}
if (face == GL_BACK || face == GL_FRONT_AND_BACK) {
mask_stencil_back_ = mask;
}
state_dirty_ = true;
}
| 0
|
CVE-2011-2858
|
CWE-119
|
int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
{
int error;
spin_lock_irq(¤t->sighand->siglock);
if (oldset)
*oldset = current->blocked;
error = 0;
switch (how) {
case SIG_BLOCK:
sigorsets(¤t->blocked, ¤t->blocked, set);
break;
case SIG_UNBLOCK:
signandsets(¤t->blocked, ¤t->blocked, set);
break;
case SIG_SETMASK:
current->blocked = *set;
break;
default:
error = -EINVAL;
}
recalc_sigpending();
spin_unlock_irq(¤t->sighand->siglock);
return error;
}
| 0
|
CVE-2011-1182
| null |
void CleanUp(DownloadId id) {
MockDownloadFile* file = download_file_factory_->GetExistingFile(id);
ASSERT_TRUE(file != NULL);
EXPECT_CALL(*file, Cancel());
download_file_manager_->CancelDownload(id);
EXPECT_TRUE(NULL == download_file_manager_->GetDownloadFile(id));
}
| 1
|
CVE-2012-2895
|
CWE-119
|
long prune_icache_sb(struct super_block *sb, unsigned long nr_to_scan,
int nid)
{
LIST_HEAD(freeable);
long freed;
freed = list_lru_walk_node(&sb->s_inode_lru, nid, inode_lru_isolate,
&freeable, &nr_to_scan);
dispose_list(&freeable);
return freed;
}
| 0
|
CVE-2014-4014
|
CWE-264
|
void GetInterface(const std::string& interface_name,
mojo::ScopedMessagePipeHandle handle) {
DCHECK(thread_checker_.CalledOnValidThread());
DCHECK_CURRENTLY_ON(BrowserThread::IO);
service_manager::mojom::InterfaceProvider* provider = registry_.get();
base::AutoLock lock(enabled_lock_);
if (enabled_)
provider->GetInterface(interface_name, std::move(handle));
}
| 0
|
CVE-2015-1265
| null |
static void nlmclnt_locks_copy_lock(struct file_lock *new, struct file_lock *fl)
{
spin_lock(&fl->fl_u.nfs_fl.owner->host->h_lock);
new->fl_u.nfs_fl.state = fl->fl_u.nfs_fl.state;
new->fl_u.nfs_fl.owner = nlm_get_lockowner(fl->fl_u.nfs_fl.owner);
list_add_tail(&new->fl_u.nfs_fl.list, &fl->fl_u.nfs_fl.owner->host->h_granted);
spin_unlock(&fl->fl_u.nfs_fl.owner->host->h_lock);
}
| 0
|
CVE-2011-2491
|
CWE-399
|
cifs_echo_request(struct work_struct *work)
{
int rc;
struct TCP_Server_Info *server = container_of(work,
struct TCP_Server_Info, echo.work);
/*
* We cannot send an echo until the NEGOTIATE_PROTOCOL request is
* done, which is indicated by maxBuf != 0. Also, no need to ping if
* we got a response recently
*/
if (server->maxBuf == 0 ||
time_before(jiffies, server->lstrp + SMB_ECHO_INTERVAL - HZ))
goto requeue_echo;
rc = CIFSSMBEcho(server);
if (rc)
cFYI(1, "Unable to send echo request to server: %s",
server->hostname);
requeue_echo:
queue_delayed_work(system_nrt_wq, &server->echo, SMB_ECHO_INTERVAL);
}
| 0
|
CVE-2011-3363
|
CWE-20
|
SegmentInfo::~SegmentInfo()
{
delete[] m_pMuxingAppAsUTF8;
m_pMuxingAppAsUTF8 = NULL;
delete[] m_pWritingAppAsUTF8;
m_pWritingAppAsUTF8 = NULL;
delete[] m_pTitleAsUTF8;
m_pTitleAsUTF8 = NULL;
}
| 1
|
CVE-2016-1621
|
CWE-119
|
ext4_ext_binsearch(struct inode *inode,
struct ext4_ext_path *path, ext4_lblk_t block)
{
struct ext4_extent_header *eh = path->p_hdr;
struct ext4_extent *r, *l, *m;
if (eh->eh_entries == 0) {
/*
* this leaf is empty:
* we get such a leaf in split/add case
*/
return;
}
ext_debug("binsearch for %u: ", block);
l = EXT_FIRST_EXTENT(eh) + 1;
r = EXT_LAST_EXTENT(eh);
while (l <= r) {
m = l + (r - l) / 2;
if (block < le32_to_cpu(m->ee_block))
r = m - 1;
else
l = m + 1;
ext_debug("%p(%u):%p(%u):%p(%u) ", l, le32_to_cpu(l->ee_block),
m, le32_to_cpu(m->ee_block),
r, le32_to_cpu(r->ee_block));
}
path->p_ext = l - 1;
ext_debug(" -> %d:%llu:[%d]%d ",
le32_to_cpu(path->p_ext->ee_block),
ext_pblock(path->p_ext),
ext4_ext_is_uninitialized(path->p_ext),
ext4_ext_get_actual_len(path->p_ext));
#ifdef CHECK_BINSEARCH
{
struct ext4_extent *chex, *ex;
int k;
chex = ex = EXT_FIRST_EXTENT(eh);
for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ex++) {
BUG_ON(k && le32_to_cpu(ex->ee_block)
<= le32_to_cpu(ex[-1].ee_block));
if (block < le32_to_cpu(ex->ee_block))
break;
chex = ex;
}
BUG_ON(chex != path->p_ext);
}
#endif
}
| 0
|
CVE-2015-8324
| null |
void AudioContext::derefFinishedSourceNodes()
{
ASSERT(isGraphOwner());
ASSERT(isAudioThread());
for (unsigned i = 0; i < m_finishedNodes.size(); i++)
derefNode(m_finishedNodes[i]);
m_finishedNodes.clear();
}
| 0
| null | null |
encode_legacy_async_masks(const struct ofputil_async_cfg *ac,
enum ofputil_async_msg_type oam,
enum ofp_version version,
ovs_be32 masks[2])
{
for (int i = 0; i < 2; i++) {
bool master = i == 0;
const struct ofp14_async_prop *ap
= get_ofp14_async_config_prop_by_oam(oam, master);
masks[i] = encode_async_mask(ac, ap, version);
}
}
| 0
|
CVE-2018-17204
|
CWE-617
|
static int show_pid_smap(struct seq_file *m, void *v)
{
return show_smap(m, v, 1);
}
| 0
|
CVE-2016-0823
|
CWE-200
|
void TestLoadTimingReused(const LoadTimingInfo& load_timing_info) {
EXPECT_TRUE(load_timing_info.socket_reused);
EXPECT_NE(NetLogSource::kInvalidId, load_timing_info.socket_log_id);
EXPECT_TRUE(load_timing_info.proxy_resolve_start.is_null());
EXPECT_TRUE(load_timing_info.proxy_resolve_end.is_null());
ExpectConnectTimingHasNoTimes(load_timing_info.connect_timing);
EXPECT_FALSE(load_timing_info.send_start.is_null());
EXPECT_LE(load_timing_info.send_start, load_timing_info.send_end);
EXPECT_TRUE(load_timing_info.request_start_time.is_null());
EXPECT_TRUE(load_timing_info.request_start.is_null());
EXPECT_TRUE(load_timing_info.receive_headers_end.is_null());
}
| 0
|
CVE-2018-18358
|
CWE-20
|
static void nfs4_delegreturn_release(void *calldata)
{
kfree(calldata);
}
| 0
|
CVE-2012-2375
|
CWE-189
|
struct file *open_exec(const char *name)
{
struct filename *filename = getname_kernel(name);
struct file *f = ERR_CAST(filename);
if (!IS_ERR(filename)) {
f = do_open_execat(AT_FDCWD, filename, 0);
putname(filename);
}
return f;
}
| 0
|
CVE-2015-3339
|
CWE-362
|
void GpuCommandBufferStub::OnCreateVideoDecoder(
media::VideoCodecProfile profile,
IPC::Message* reply_message) {
int decoder_route_id = channel_->GenerateRouteID();
GpuCommandBufferMsg_CreateVideoDecoder::WriteReplyParams(
reply_message, decoder_route_id);
GpuVideoDecodeAccelerator* decoder =
new GpuVideoDecodeAccelerator(this, decoder_route_id, this);
video_decoders_.AddWithID(decoder, decoder_route_id);
channel_->AddRoute(decoder_route_id, decoder);
decoder->Initialize(profile, reply_message,
channel_->renderer_process());
}
| 1
|
CVE-2012-2816
| null |
void WebContentsImpl::UpdateOverridingUserAgent() {
NotifyPreferencesChanged();
}
| 0
|
CVE-2017-5093
|
CWE-20
|
static FloatPoint3D TransformOrigin(const LayoutBox& box) {
const ComputedStyle& style = box.StyleRef();
if (!style.HasTransform())
return FloatPoint3D();
FloatSize border_box_size(box.Size());
return FloatPoint3D(
FloatValueForLength(style.TransformOriginX(), border_box_size.Width()),
FloatValueForLength(style.TransformOriginY(), border_box_size.Height()),
style.TransformOriginZ());
}
| 0
|
CVE-2015-6787
| null |
static void encode_fs_locations(struct xdr_stream *xdr, const u32* bitmask, struct compound_hdr *hdr)
{
encode_getattr_two(xdr, bitmask[0] & nfs4_fs_locations_bitmap[0],
bitmask[1] & nfs4_fs_locations_bitmap[1], hdr);
}
| 0
|
CVE-2011-4131
|
CWE-189
|
bool PaintPropertyTreeBuilder::UpdateFragments() {
bool had_paint_properties = object_.FirstFragment().PaintProperties();
bool needs_paint_properties =
object_.StyleRef().ClipPath() || NeedsPaintOffsetTranslation(object_) ||
NeedsTransform(object_) || NeedsClipPathClip(object_) ||
NeedsEffect(object_) || NeedsTransformForNonRootSVG(object_) ||
NeedsFilter(object_) || NeedsCssClip(object_) ||
NeedsInnerBorderRadiusClip(object_) || NeedsOverflowClip(object_) ||
NeedsPerspective(object_) || NeedsSVGLocalToBorderBoxTransform(object_) ||
NeedsScrollOrScrollTranslation(object_);
if (object_.IsFixedPositionObjectInPagedMedia()) {
context_.is_repeating_fixed_position = true;
CreateFragmentContextsForRepeatingFixedPosition();
} else if (ObjectIsRepeatingTableSectionInPagedMedia()) {
context_.is_repeating_table_section = true;
CreateFragmentContextsForRepeatingTableSectionInPagedMedia();
}
if (IsRepeatingInPagedMedia()) {
CreateFragmentDataForRepeatingInPagedMedia(needs_paint_properties);
} else if (context_.painting_layer->ShouldFragmentCompositedBounds()) {
CreateFragmentContextsInFlowThread(needs_paint_properties);
} else {
InitSingleFragmentFromParent(needs_paint_properties);
UpdateCompositedLayerPaginationOffset();
context_.is_repeating_fixed_position = false;
context_.is_repeating_table_section = false;
}
if (object_.IsSVGHiddenContainer()) {
context_.fragments.clear();
context_.fragments.Grow(1);
context_.has_svg_hidden_container_ancestor = true;
PaintPropertyTreeBuilderFragmentContext& fragment_context =
context_.fragments[0];
fragment_context.current.paint_offset_root =
fragment_context.absolute_position.paint_offset_root =
fragment_context.fixed_position.paint_offset_root = &object_;
object_.GetMutableForPainting().FirstFragment().ClearNextFragment();
}
if (object_.HasLayer()) {
ToLayoutBoxModelObject(object_).Layer()->SetIsUnderSVGHiddenContainer(
context_.has_svg_hidden_container_ancestor);
}
UpdateRepeatingTableSectionPaintOffsetAdjustment();
return needs_paint_properties != had_paint_properties;
}
| 0
|
CVE-2015-6787
| null |
buffer_write(struct display *dp, struct buffer *buffer, png_bytep data,
png_size_t size)
/* Generic write function used both from the write callback provided to
* libpng and from the generic read code.
*/
{
/* Write the data into the buffer, adding buffers as required */
struct buffer_list *last = buffer->last;
size_t end_count = buffer->end_count;
while (size > 0)
{
size_t avail;
if (end_count >= sizeof last->buffer)
{
if (last->next == NULL)
{
last = buffer_extend(last);
if (last == NULL)
display_log(dp, APP_ERROR, "out of memory saving file");
}
else
last = last->next;
buffer->last = last; /* avoid the need to rewrite every time */
end_count = 0;
}
avail = (sizeof last->buffer) - end_count;
if (avail > size)
avail = size;
memcpy(last->buffer + end_count, data, avail);
end_count += avail;
size -= avail;
data += avail;
}
buffer->end_count = end_count;
}
| 0
|
CVE-2016-3751
| null |
static inline bool isChildHitTestCandidate(RenderBox* box)
{
return box->height() && box->style()->visibility() == VISIBLE && !box->isFloatingOrOutOfFlowPositioned();
}
| 0
|
CVE-2013-0904
|
CWE-119
|
static void StringFrozenArrayAttributeAttributeGetter(const v8::FunctionCallbackInfo<v8::Value>& info) {
v8::Local<v8::Object> holder = info.Holder();
TestObject* impl = V8TestObject::ToImpl(holder);
V8SetReturnValue(info, FreezeV8Object(ToV8(impl->stringFrozenArrayAttribute(), info.Holder(), info.GetIsolate()), info.GetIsolate()));
}
| 0
|
CVE-2017-5120
| null |
status_t Camera2Client::startRecording() {
ATRACE_CALL();
ALOGV("%s: E", __FUNCTION__);
Mutex::Autolock icl(mBinderSerializationLock);
status_t res;
if ( (res = checkPid(__FUNCTION__) ) != OK) return res;
SharedParameters::Lock l(mParameters);
return startRecordingL(l.mParameters, false);
}
| 0
|
CVE-2016-0826
|
CWE-264
|
tty_ldisc_lock(struct tty_struct *tty, unsigned long timeout)
{
int ret;
ret = __tty_ldisc_lock(tty, timeout);
if (!ret)
return -EBUSY;
set_bit(TTY_LDISC_HALTED, &tty->flags);
return 0;
}
| 0
|
CVE-2015-8964
|
CWE-200
|
char *http_get_path(struct http_txn *txn)
{
char *ptr, *end;
ptr = txn->req.chn->buf->p + txn->req.sl.rq.u;
end = ptr + txn->req.sl.rq.u_l;
if (ptr >= end)
return NULL;
/* RFC7230, par. 2.7 :
* Request-URI = "*" | absuri | abspath | authority
*/
if (*ptr == '*')
return NULL;
if (isalpha((unsigned char)*ptr)) {
/* this is a scheme as described by RFC3986, par. 3.1 */
ptr++;
while (ptr < end &&
(isalnum((unsigned char)*ptr) || *ptr == '+' || *ptr == '-' || *ptr == '.'))
ptr++;
/* skip '://' */
if (ptr == end || *ptr++ != ':')
return NULL;
if (ptr == end || *ptr++ != '/')
return NULL;
if (ptr == end || *ptr++ != '/')
return NULL;
}
/* skip [user[:passwd]@]host[:[port]] */
while (ptr < end && *ptr != '/')
ptr++;
if (ptr == end)
return NULL;
/* OK, we got the '/' ! */
return ptr;
}
| 0
|
CVE-2018-11469
|
CWE-200
|
int ocfs2_permission(struct inode *inode, int mask)
{
int ret, had_lock;
struct ocfs2_lock_holder oh;
if (mask & MAY_NOT_BLOCK)
return -ECHILD;
had_lock = ocfs2_inode_lock_tracker(inode, NULL, 0, &oh);
if (had_lock < 0) {
ret = had_lock;
goto out;
} else if (had_lock) {
/* See comments in ocfs2_setattr() for details.
* The call chain of this case could be:
* do_sys_open()
* may_open()
* inode_permission()
* ocfs2_permission()
* ocfs2_iop_get_acl()
*/
mlog(ML_ERROR, "Another case of recursive locking:\n");
dump_stack();
}
ret = generic_permission(inode, mask);
ocfs2_inode_unlock_tracker(inode, 0, &oh, had_lock);
out:
return ret;
}
| 0
|
CVE-2017-18204
| null |
valgrind_append(char **dst, int valgrind_gdbserver, int valgrind_mode, int valgrind_tool, char *valgrind_path, const char *valgrind_log)
{
int i = 0;
if (valgrind_tool)
{
dst[i++] = valgrind_path;
switch (valgrind_tool)
{
case 1: dst[i++] = "--tool=massif"; break;
case 2: dst[i++] = "--tool=callgrind"; break;
}
return i;
}
if (valgrind_gdbserver) dst[i++] = "--db-attach=yes";
if (!valgrind_mode) return 0;
dst[i++] = valgrind_path;
dst[i++] = "--num-callers=40";
dst[i++] = "--track-origins=yes";
dst[i++] = "--malloc-fill=13"; /* invalid pointer, make it crash */
if (valgrind_log)
{
static char logparam[PATH_MAX + sizeof("--log-file=")];
snprintf(logparam, sizeof(logparam), "--log-file=%s", valgrind_log);
dst[i++] = logparam;
}
if (valgrind_mode & 2) dst[i++] = "--trace-children=yes";
if (valgrind_mode & 4)
{
dst[i++] = "--leak-check=full";
dst[i++] = "--leak-resolution=high";
dst[i++] = "--track-fds=yes";
}
if (valgrind_mode & 8) dst[i++] = "--show-reachable=yes";
return i;
}
| 0
|
CVE-2014-1846
|
CWE-264
|
void SoftwareFrameManager::EvictCurrentFrame() {
DCHECK(HasCurrentFrame());
DiscardCurrentFrame();
if (client_)
client_->ReleaseReferencesToSoftwareFrame();
}
| 0
| null | null |
PrintDialogGtk::~PrintDialogGtk() {
DCHECK(BrowserThread::CurrentlyOn(BrowserThread::UI));
if (dialog_) {
gtk_widget_destroy(dialog_);
dialog_ = NULL;
}
if (gtk_settings_) {
g_object_unref(gtk_settings_);
gtk_settings_ = NULL;
}
if (page_setup_) {
g_object_unref(page_setup_);
page_setup_ = NULL;
}
if (printer_) {
g_object_unref(printer_);
printer_ = NULL;
}
}
| 0
|
CVE-2011-3897
|
CWE-399
|
WORD32 ih264d_end_of_pic_dispbuf_mgr(dec_struct_t * ps_dec)
{
dec_slice_params_t *ps_cur_slice = ps_dec->ps_cur_slice;
UWORD8 u1_num_of_users = 0;
WORD32 ret;
H264_MUTEX_LOCK(&ps_dec->process_disp_mutex);
if(1)
{
{
ih264d_delete_nonref_nondisplay_pics(ps_dec->ps_dpb_mgr);
if(ps_cur_slice->u1_mmco_equalto5
|| (ps_cur_slice->u1_nal_unit_type == IDR_SLICE_NAL))
{
ps_dec->ps_cur_pic->i4_poc = 0;
if(ps_dec->u2_total_mbs_coded
== (ps_dec->ps_cur_sps->u2_max_mb_addr + 1))
ih264d_reset_ref_bufs(ps_dec->ps_dpb_mgr);
ih264d_release_display_bufs(ps_dec);
}
if(IVD_DECODE_FRAME_OUT != ps_dec->e_frm_out_mode)
{
ret = ih264d_assign_display_seq(ps_dec);
if(ret != OK)
return ret;
}
}
if(ps_cur_slice->u1_nal_ref_idc)
{
/* Mark pic buf as needed for reference */
ih264_buf_mgr_set_status((buf_mgr_t *)ps_dec->pv_pic_buf_mgr,
ps_dec->u1_pic_buf_id,
BUF_MGR_REF);
/* Mark mv buf as needed for reference */
ih264_buf_mgr_set_status((buf_mgr_t *)ps_dec->pv_mv_buf_mgr,
ps_dec->au1_pic_buf_id_mv_buf_id_map[ps_dec->u1_pic_buf_id],
BUF_MGR_REF);
ps_dec->au1_pic_buf_ref_flag[ps_dec->u1_pic_buf_id] = 1;
}
/* 420 consumer */
/* Increment the number of users by 1 for display based upon */
/*the SEEK KEY FRAME control sent to decoder */
if(((0 == ps_dec->u1_last_pic_not_decoded)
&& (0
== (ps_dec->ps_cur_pic->u4_pack_slc_typ
& ps_dec->u4_skip_frm_mask)))
|| (ps_cur_slice->u1_nal_unit_type == IDR_SLICE_NAL))
{
/* Mark pic buf as needed for display */
ih264_buf_mgr_set_status((buf_mgr_t *)ps_dec->pv_pic_buf_mgr,
ps_dec->u1_pic_buf_id,
BUF_MGR_IO);
}
if(!ps_cur_slice->u1_field_pic_flag
|| ((TOP_FIELD_ONLY | BOT_FIELD_ONLY)
!= ps_dec->u1_top_bottom_decoded))
{
pic_buffer_t *ps_cur_pic = ps_dec->ps_cur_pic;
ps_cur_pic->u2_disp_width = ps_dec->u2_disp_width;
ps_cur_pic->u2_disp_height = ps_dec->u2_disp_height >> 1;
ps_cur_pic->u2_crop_offset_y = ps_dec->u2_crop_offset_y;
ps_cur_pic->u2_crop_offset_uv = ps_dec->u2_crop_offset_uv;
ps_cur_pic->u1_pic_type = 0;
ret = ih264d_insert_pic_in_display_list(
ps_dec->ps_dpb_mgr,
ps_dec->u1_pic_buf_id,
ps_dec->i4_prev_max_display_seq
+ ps_dec->ps_cur_pic->i4_poc,
ps_dec->ps_cur_pic->i4_frame_num);
if(ret != OK)
return ret;
{
ivd_video_decode_op_t * ps_dec_output =
(ivd_video_decode_op_t *)ps_dec->pv_dec_out;
ps_dec_output->u4_frame_decoded_flag = 1;
}
if(ps_dec->au1_pic_buf_ref_flag[ps_dec->u1_pic_buf_id] == 0)
{
ih264_buf_mgr_release((buf_mgr_t *)ps_dec->pv_mv_buf_mgr,
ps_dec->au1_pic_buf_id_mv_buf_id_map[ps_dec->u1_pic_buf_id],
BUF_MGR_REF);
ps_dec->au1_pic_buf_ref_flag[ps_dec->u1_pic_buf_id] = 0;
}
}
else
{
H264_DEC_DEBUG_PRINT("pic not inserted display %d %d\n",
ps_cur_slice->u1_field_pic_flag,
ps_dec->u1_second_field);
}
if(!ps_cur_slice->u1_field_pic_flag
|| ((TOP_FIELD_ONLY | BOT_FIELD_ONLY)
== ps_dec->u1_top_bottom_decoded))
{
if(IVD_DECODE_FRAME_OUT == ps_dec->e_frm_out_mode)
{
ret = ih264d_assign_display_seq(ps_dec);
if(ret != OK)
return ret;
}
}
}
H264_MUTEX_UNLOCK(&ps_dec->process_disp_mutex);
return OK;
}
| 0
|
CVE-2016-3829
|
CWE-172
|
static ssize_t vhost_scsi_tpg_show_nexus(struct se_portal_group *se_tpg,
char *page)
{
struct vhost_scsi_tpg *tpg = container_of(se_tpg,
struct vhost_scsi_tpg, se_tpg);
struct vhost_scsi_nexus *tv_nexus;
ssize_t ret;
mutex_lock(&tpg->tv_tpg_mutex);
tv_nexus = tpg->tpg_nexus;
if (!tv_nexus) {
mutex_unlock(&tpg->tv_tpg_mutex);
return -ENODEV;
}
ret = snprintf(page, PAGE_SIZE, "%s\n",
tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
mutex_unlock(&tpg->tv_tpg_mutex);
return ret;
}
| 0
|
CVE-2015-4036
|
CWE-119
|
void net_ns_barrier(void)
{
mutex_lock(&net_mutex);
mutex_unlock(&net_mutex);
}
| 0
|
CVE-2017-15129
|
CWE-416
|
void TestLoadTimingNotReusedWithPac(const net::LoadTimingInfo& load_timing_info,
int connect_timing_flags) {
EXPECT_FALSE(load_timing_info.socket_reused);
EXPECT_NE(net::NetLog::Source::kInvalidId, load_timing_info.socket_log_id);
EXPECT_FALSE(load_timing_info.proxy_resolve_start.is_null());
EXPECT_LE(load_timing_info.proxy_resolve_start,
load_timing_info.proxy_resolve_end);
EXPECT_LE(load_timing_info.proxy_resolve_end,
load_timing_info.connect_timing.connect_start);
net::ExpectConnectTimingHasTimes(load_timing_info.connect_timing,
connect_timing_flags);
EXPECT_LE(load_timing_info.connect_timing.connect_end,
load_timing_info.send_start);
EXPECT_LE(load_timing_info.send_start, load_timing_info.send_end);
EXPECT_TRUE(load_timing_info.request_start_time.is_null());
EXPECT_TRUE(load_timing_info.request_start.is_null());
EXPECT_TRUE(load_timing_info.receive_headers_end.is_null());
}
| 0
|
CVE-2015-1229
|
CWE-19
|
void HttpBridge::MakeAsynchronousPost() {
DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO));
base::AutoLock lock(fetch_state_lock_);
DCHECK(!fetch_state_.request_completed);
if (fetch_state_.aborted)
return;
fetch_state_.url_poster = new URLFetcher(url_for_request_,
URLFetcher::POST, this);
fetch_state_.url_poster->set_request_context(context_getter_for_request_);
fetch_state_.url_poster->set_upload_data(content_type_, request_content_);
fetch_state_.url_poster->set_extra_request_headers(extra_headers_);
fetch_state_.url_poster->set_load_flags(net::LOAD_DO_NOT_SEND_COOKIES);
fetch_state_.url_poster->Start();
}
| 1
|
CVE-2011-2793
|
CWE-399
|
static void udf_load_fileset(struct super_block *sb, struct buffer_head *bh,
struct kernel_lb_addr *root)
{
struct fileSetDesc *fset;
fset = (struct fileSetDesc *)bh->b_data;
*root = lelb_to_cpu(fset->rootDirectoryICB.extLocation);
UDF_SB(sb)->s_serial_number = le16_to_cpu(fset->descTag.tagSerialNum);
udf_debug("Rootdir at block=%d, partition=%d\n",
root->logicalBlockNum, root->partitionReferenceNum);
}
| 0
|
CVE-2012-3400
|
CWE-119
|
Response InspectorPageAgent::addScriptToEvaluateOnLoad(const String& source,
String* identifier) {
protocol::DictionaryValue* scripts =
state_->getObject(PageAgentState::kPageAgentScriptsToEvaluateOnLoad);
if (!scripts) {
std::unique_ptr<protocol::DictionaryValue> new_scripts =
protocol::DictionaryValue::create();
scripts = new_scripts.get();
state_->setObject(PageAgentState::kPageAgentScriptsToEvaluateOnLoad,
std::move(new_scripts));
}
do {
*identifier = String::Number(++last_script_identifier_);
} while (scripts->get(*identifier));
scripts->setString(*identifier, source);
return Response::OK();
}
| 0
|
CVE-2017-5009
|
CWE-119
|
PHP_METHOD(HttpParams, offsetGet)
{
char *name_str;
int name_len;
zval **zparam, *zparams;
if (SUCCESS != zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "s", &name_str, &name_len)) {
return;
}
zparams = php_http_ztyp(IS_ARRAY, zend_read_property(php_http_params_class_entry, getThis(), ZEND_STRL("params"), 0 TSRMLS_CC));
if (SUCCESS == zend_symtable_find(Z_ARRVAL_P(zparams), name_str, name_len + 1, (void *) &zparam)) {
RETVAL_ZVAL(*zparam, 1, 0);
}
zval_ptr_dtor(&zparams);
}
| 0
|
CVE-2016-7398
|
CWE-704
|
int PDFiumEngine::GetMostVisiblePage() {
if (in_flight_visible_page_)
return *in_flight_visible_page_;
base::AutoReset<bool> defer_page_unload_guard(&defer_page_unload_, true);
CalculateVisiblePages();
return most_visible_page_;
}
| 0
|
CVE-2018-6031
|
CWE-416
|
RenderProcessHost* Wait(base::TimeDelta timeout = base::TimeDelta::Max()) {
if (!captured_render_process_host_) {
base::OneShotTimer timer;
timer.Start(FROM_HERE, timeout, run_loop_.QuitClosure());
run_loop_.Run();
timer.Stop();
}
return captured_render_process_host_;
}
| 0
|
CVE-2018-18349
|
CWE-732
|
RenderViewHostImpl* RenderViewHostManager::Navigate(
const NavigationEntryImpl& entry) {
RenderViewHostImpl* dest_render_view_host =
static_cast<RenderViewHostImpl*>(UpdateRendererStateForNavigate(entry));
if (!dest_render_view_host)
return NULL; // We weren't able to create a pending render view host.
if (dest_render_view_host != render_view_host_ &&
!render_view_host_->IsRenderViewLive()) {
delegate_->CreateRenderViewForRenderManager(render_view_host_,
MSG_ROUTING_NONE);
}
if (!dest_render_view_host->IsRenderViewLive()) {
if (!InitRenderView(dest_render_view_host, MSG_ROUTING_NONE))
return NULL;
if (dest_render_view_host != render_view_host_ &&
dest_render_view_host->GetView()) {
dest_render_view_host->GetView()->Hide();
} else {
RenderViewHost* null_rvh = NULL;
std::pair<RenderViewHost*, RenderViewHost*> details =
std::make_pair(null_rvh, render_view_host_);
NotificationService::current()->Notify(
NOTIFICATION_RENDER_VIEW_HOST_CHANGED,
Source<NavigationController>(
&delegate_->GetControllerForRenderManager()),
Details<std::pair<RenderViewHost*, RenderViewHost*> >(
&details));
}
}
return dest_render_view_host;
}
| 0
|
CVE-2013-0921
|
CWE-264
|
static bool nested_svm_vmrun(struct vcpu_svm *svm)
{
struct vmcb *nested_vmcb;
struct vmcb *hsave = svm->nested.hsave;
struct vmcb *vmcb = svm->vmcb;
struct page *page;
u64 vmcb_gpa;
vmcb_gpa = svm->vmcb->save.rax;
nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, &page);
if (!nested_vmcb)
return false;
if (!nested_vmcb_checks(nested_vmcb)) {
nested_vmcb->control.exit_code = SVM_EXIT_ERR;
nested_vmcb->control.exit_code_hi = 0;
nested_vmcb->control.exit_info_1 = 0;
nested_vmcb->control.exit_info_2 = 0;
nested_svm_unmap(page);
return false;
}
trace_kvm_nested_vmrun(svm->vmcb->save.rip, vmcb_gpa,
nested_vmcb->save.rip,
nested_vmcb->control.int_ctl,
nested_vmcb->control.event_inj,
nested_vmcb->control.nested_ctl);
trace_kvm_nested_intercepts(nested_vmcb->control.intercept_cr & 0xffff,
nested_vmcb->control.intercept_cr >> 16,
nested_vmcb->control.intercept_exceptions,
nested_vmcb->control.intercept);
/* Clear internal status */
kvm_clear_exception_queue(&svm->vcpu);
kvm_clear_interrupt_queue(&svm->vcpu);
/*
* Save the old vmcb, so we don't need to pick what we save, but can
* restore everything when a VMEXIT occurs
*/
hsave->save.es = vmcb->save.es;
hsave->save.cs = vmcb->save.cs;
hsave->save.ss = vmcb->save.ss;
hsave->save.ds = vmcb->save.ds;
hsave->save.gdtr = vmcb->save.gdtr;
hsave->save.idtr = vmcb->save.idtr;
hsave->save.efer = svm->vcpu.arch.efer;
hsave->save.cr0 = kvm_read_cr0(&svm->vcpu);
hsave->save.cr4 = svm->vcpu.arch.cr4;
hsave->save.rflags = kvm_get_rflags(&svm->vcpu);
hsave->save.rip = kvm_rip_read(&svm->vcpu);
hsave->save.rsp = vmcb->save.rsp;
hsave->save.rax = vmcb->save.rax;
if (npt_enabled)
hsave->save.cr3 = vmcb->save.cr3;
else
hsave->save.cr3 = kvm_read_cr3(&svm->vcpu);
copy_vmcb_control_area(hsave, vmcb);
if (kvm_get_rflags(&svm->vcpu) & X86_EFLAGS_IF)
svm->vcpu.arch.hflags |= HF_HIF_MASK;
else
svm->vcpu.arch.hflags &= ~HF_HIF_MASK;
if (nested_vmcb->control.nested_ctl) {
kvm_mmu_unload(&svm->vcpu);
svm->nested.nested_cr3 = nested_vmcb->control.nested_cr3;
nested_svm_init_mmu_context(&svm->vcpu);
}
/* Load the nested guest state */
svm->vmcb->save.es = nested_vmcb->save.es;
svm->vmcb->save.cs = nested_vmcb->save.cs;
svm->vmcb->save.ss = nested_vmcb->save.ss;
svm->vmcb->save.ds = nested_vmcb->save.ds;
svm->vmcb->save.gdtr = nested_vmcb->save.gdtr;
svm->vmcb->save.idtr = nested_vmcb->save.idtr;
kvm_set_rflags(&svm->vcpu, nested_vmcb->save.rflags);
svm_set_efer(&svm->vcpu, nested_vmcb->save.efer);
svm_set_cr0(&svm->vcpu, nested_vmcb->save.cr0);
svm_set_cr4(&svm->vcpu, nested_vmcb->save.cr4);
if (npt_enabled) {
svm->vmcb->save.cr3 = nested_vmcb->save.cr3;
svm->vcpu.arch.cr3 = nested_vmcb->save.cr3;
} else
(void)kvm_set_cr3(&svm->vcpu, nested_vmcb->save.cr3);
/* Guest paging mode is active - reset mmu */
kvm_mmu_reset_context(&svm->vcpu);
svm->vmcb->save.cr2 = svm->vcpu.arch.cr2 = nested_vmcb->save.cr2;
kvm_register_write(&svm->vcpu, VCPU_REGS_RAX, nested_vmcb->save.rax);
kvm_register_write(&svm->vcpu, VCPU_REGS_RSP, nested_vmcb->save.rsp);
kvm_register_write(&svm->vcpu, VCPU_REGS_RIP, nested_vmcb->save.rip);
/* In case we don't even reach vcpu_run, the fields are not updated */
svm->vmcb->save.rax = nested_vmcb->save.rax;
svm->vmcb->save.rsp = nested_vmcb->save.rsp;
svm->vmcb->save.rip = nested_vmcb->save.rip;
svm->vmcb->save.dr7 = nested_vmcb->save.dr7;
svm->vmcb->save.dr6 = nested_vmcb->save.dr6;
svm->vmcb->save.cpl = nested_vmcb->save.cpl;
svm->nested.vmcb_msrpm = nested_vmcb->control.msrpm_base_pa & ~0x0fffULL;
svm->nested.vmcb_iopm = nested_vmcb->control.iopm_base_pa & ~0x0fffULL;
/* cache intercepts */
svm->nested.intercept_cr = nested_vmcb->control.intercept_cr;
svm->nested.intercept_dr = nested_vmcb->control.intercept_dr;
svm->nested.intercept_exceptions = nested_vmcb->control.intercept_exceptions;
svm->nested.intercept = nested_vmcb->control.intercept;
svm_flush_tlb(&svm->vcpu);
svm->vmcb->control.int_ctl = nested_vmcb->control.int_ctl | V_INTR_MASKING_MASK;
if (nested_vmcb->control.int_ctl & V_INTR_MASKING_MASK)
svm->vcpu.arch.hflags |= HF_VINTR_MASK;
else
svm->vcpu.arch.hflags &= ~HF_VINTR_MASK;
if (svm->vcpu.arch.hflags & HF_VINTR_MASK) {
/* We only want the cr8 intercept bits of the guest */
clr_cr_intercept(svm, INTERCEPT_CR8_READ);
clr_cr_intercept(svm, INTERCEPT_CR8_WRITE);
}
/* We don't want to see VMMCALLs from a nested guest */
clr_intercept(svm, INTERCEPT_VMMCALL);
svm->vmcb->control.lbr_ctl = nested_vmcb->control.lbr_ctl;
svm->vmcb->control.int_vector = nested_vmcb->control.int_vector;
svm->vmcb->control.int_state = nested_vmcb->control.int_state;
svm->vmcb->control.tsc_offset += nested_vmcb->control.tsc_offset;
svm->vmcb->control.event_inj = nested_vmcb->control.event_inj;
svm->vmcb->control.event_inj_err = nested_vmcb->control.event_inj_err;
nested_svm_unmap(page);
/* Enter Guest-Mode */
enter_guest_mode(&svm->vcpu);
/*
* Merge guest and host intercepts - must be called with vcpu in
* guest-mode to take affect here
*/
recalc_intercepts(svm);
svm->nested.vmcb = vmcb_gpa;
enable_gif(svm);
mark_all_dirty(svm->vmcb);
return true;
}
| 0
|
CVE-2014-3610
|
CWE-264
|
void ikev1_echo_hdr(struct msg_digest *md, bool enc, u_int8_t np)
{
struct isakmp_hdr hdr = md->hdr; /* mostly same as incoming header */
/* make sure we start with a clean buffer */
init_out_pbs(&reply_stream, reply_buffer, sizeof(reply_buffer),
"reply packet");
hdr.isa_flags = 0; /* zero all flags */
if (enc)
hdr.isa_flags |= ISAKMP_FLAGS_v1_ENCRYPTION;
if (DBGP(IMPAIR_SEND_BOGUS_ISAKMP_FLAG)) {
hdr.isa_flags |= ISAKMP_FLAGS_RESERVED_BIT6;
}
/* there is only one IKEv1 version, and no new one will ever come - no need to set version */
hdr.isa_np = np;
if (!out_struct(&hdr, &isakmp_hdr_desc, &reply_stream, &md->rbody))
impossible(); /* surely must have room and be well-formed */
}
| 0
|
CVE-2016-5361
|
CWE-20
|
void Bluetooth::CancelScan(mojo::BindingId id) {
client_bindings_.RemoveBinding(id);
}
| 0
|
CVE-2017-5044
|
CWE-119
|
modbus_mapping_t* modbus_mapping_new_start_address(
unsigned int start_bits, unsigned int nb_bits,
unsigned int start_input_bits, unsigned int nb_input_bits,
unsigned int start_registers, unsigned int nb_registers,
unsigned int start_input_registers, unsigned int nb_input_registers)
{
modbus_mapping_t *mb_mapping;
mb_mapping = (modbus_mapping_t *)malloc(sizeof(modbus_mapping_t));
if (mb_mapping == NULL) {
return NULL;
}
/* 0X */
mb_mapping->nb_bits = nb_bits;
mb_mapping->start_bits = start_bits;
if (nb_bits == 0) {
mb_mapping->tab_bits = NULL;
} else {
/* Negative number raises a POSIX error */
mb_mapping->tab_bits =
(uint8_t *) malloc(nb_bits * sizeof(uint8_t));
if (mb_mapping->tab_bits == NULL) {
free(mb_mapping);
return NULL;
}
memset(mb_mapping->tab_bits, 0, nb_bits * sizeof(uint8_t));
}
/* 1X */
mb_mapping->nb_input_bits = nb_input_bits;
mb_mapping->start_input_bits = start_input_bits;
if (nb_input_bits == 0) {
mb_mapping->tab_input_bits = NULL;
} else {
mb_mapping->tab_input_bits =
(uint8_t *) malloc(nb_input_bits * sizeof(uint8_t));
if (mb_mapping->tab_input_bits == NULL) {
free(mb_mapping->tab_bits);
free(mb_mapping);
return NULL;
}
memset(mb_mapping->tab_input_bits, 0, nb_input_bits * sizeof(uint8_t));
}
/* 4X */
mb_mapping->nb_registers = nb_registers;
mb_mapping->start_registers = start_registers;
if (nb_registers == 0) {
mb_mapping->tab_registers = NULL;
} else {
mb_mapping->tab_registers =
(uint16_t *) malloc(nb_registers * sizeof(uint16_t));
if (mb_mapping->tab_registers == NULL) {
free(mb_mapping->tab_input_bits);
free(mb_mapping->tab_bits);
free(mb_mapping);
return NULL;
}
memset(mb_mapping->tab_registers, 0, nb_registers * sizeof(uint16_t));
}
/* 3X */
mb_mapping->nb_input_registers = nb_input_registers;
mb_mapping->start_input_registers = start_input_registers;
if (nb_input_registers == 0) {
mb_mapping->tab_input_registers = NULL;
} else {
mb_mapping->tab_input_registers =
(uint16_t *) malloc(nb_input_registers * sizeof(uint16_t));
if (mb_mapping->tab_input_registers == NULL) {
free(mb_mapping->tab_registers);
free(mb_mapping->tab_input_bits);
free(mb_mapping->tab_bits);
free(mb_mapping);
return NULL;
}
memset(mb_mapping->tab_input_registers, 0,
nb_input_registers * sizeof(uint16_t));
}
return mb_mapping;
}
| 0
|
CVE-2019-14463
|
CWE-125
|
void ChromeContentBrowserClient::MaybeCopyDisableWebRtcEncryptionSwitch(
base::CommandLine* to_command_line,
const base::CommandLine& from_command_line,
version_info::Channel channel) {
#if defined(OS_ANDROID)
const version_info::Channel kMaxDisableEncryptionChannel =
version_info::Channel::BETA;
#else
const version_info::Channel kMaxDisableEncryptionChannel =
version_info::Channel::DEV;
#endif
if (channel <= kMaxDisableEncryptionChannel) {
static const char* const kWebRtcDevSwitchNames[] = {
switches::kDisableWebRtcEncryption,
};
to_command_line->CopySwitchesFrom(from_command_line,
kWebRtcDevSwitchNames,
arraysize(kWebRtcDevSwitchNames));
}
}
| 0
|
CVE-2019-5779
|
CWE-264
|
static int _nfs41_test_stateid(struct nfs_server *server, struct nfs4_state *state)
{
int status;
struct nfs41_test_stateid_args args = {
.stateid = &state->stateid,
};
struct nfs41_test_stateid_res res;
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_TEST_STATEID],
.rpc_argp = &args,
.rpc_resp = &res,
};
args.seq_args.sa_session = res.seq_res.sr_session = NULL;
status = nfs4_call_sync_sequence(server->client, server, &msg, &args.seq_args, &res.seq_res, 0, 1);
return status;
}
| 0
|
CVE-2011-4131
|
CWE-189
|
vhost_scsi_make_nodeacl(struct se_portal_group *se_tpg,
struct config_group *group,
const char *name)
{
struct se_node_acl *se_nacl, *se_nacl_new;
struct vhost_scsi_nacl *nacl;
u64 wwpn = 0;
u32 nexus_depth;
/* vhost_scsi_parse_wwn(name, &wwpn, 1) < 0)
return ERR_PTR(-EINVAL); */
se_nacl_new = vhost_scsi_alloc_fabric_acl(se_tpg);
if (!se_nacl_new)
return ERR_PTR(-ENOMEM);
nexus_depth = 1;
/*
* se_nacl_new may be released by core_tpg_add_initiator_node_acl()
* when converting a NodeACL from demo mode -> explict
*/
se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new,
name, nexus_depth);
if (IS_ERR(se_nacl)) {
vhost_scsi_release_fabric_acl(se_tpg, se_nacl_new);
return se_nacl;
}
/*
* Locate our struct vhost_scsi_nacl and set the FC Nport WWPN
*/
nacl = container_of(se_nacl, struct vhost_scsi_nacl, se_node_acl);
nacl->iport_wwpn = wwpn;
return se_nacl;
}
| 0
|
CVE-2015-4036
|
CWE-119
|
Gfx::~Gfx() {
while (stateGuards.size()) {
popStateGuard();
}
while (state->hasSaves()) {
error(-1, "Found state under last state guard. Popping.");
restoreState();
}
if (!subPage) {
out->endPage();
}
while (res) {
popResources();
}
if (state) {
delete state;
}
while (mcStack) {
popMarkedContent();
}
}
| 0
|
CVE-2010-3702
|
CWE-20
|
struct rose_neigh *rose_get_neigh(rose_address *addr, unsigned char *cause,
unsigned char *diagnostic, int route_frame)
{
struct rose_neigh *res = NULL;
struct rose_node *node;
int failed = 0;
int i;
if (!route_frame) spin_lock_bh(&rose_node_list_lock);
for (node = rose_node_list; node != NULL; node = node->next) {
if (rosecmpm(addr, &node->address, node->mask) == 0) {
for (i = 0; i < node->count; i++) {
if (node->neighbour[i]->restarted) {
res = node->neighbour[i];
goto out;
}
}
}
}
if (!route_frame) { /* connect request */
for (node = rose_node_list; node != NULL; node = node->next) {
if (rosecmpm(addr, &node->address, node->mask) == 0) {
for (i = 0; i < node->count; i++) {
if (!rose_ftimer_running(node->neighbour[i])) {
res = node->neighbour[i];
failed = 0;
goto out;
}
failed = 1;
}
}
}
}
if (failed) {
*cause = ROSE_OUT_OF_ORDER;
*diagnostic = 0;
} else {
*cause = ROSE_NOT_OBTAINABLE;
*diagnostic = 0;
}
out:
if (!route_frame) spin_unlock_bh(&rose_node_list_lock);
return res;
}
| 0
|
CVE-2011-4914
|
CWE-20
|
static int ssl3_check_client_certificate(SSL *s)
{
unsigned long alg_k;
if (!s->cert || !s->cert->key->x509 || !s->cert->key->privatekey)
return 0;
/* If no suitable signature algorithm can't use certificate */
if (SSL_USE_SIGALGS(s) && !s->cert->key->digest)
return 0;
/*
* If strict mode check suitability of chain before using it. This also
* adjusts suite B digest if necessary.
*/
if (s->cert->cert_flags & SSL_CERT_FLAGS_CHECK_TLS_STRICT &&
!tls1_check_chain(s, NULL, NULL, NULL, -2))
return 0;
alg_k = s->s3->tmp.new_cipher->algorithm_mkey;
/* See if we can use client certificate for fixed DH */
if (alg_k & (SSL_kDHr | SSL_kDHd)) {
SESS_CERT *scert = s->session->sess_cert;
int i = scert->peer_cert_type;
EVP_PKEY *clkey = NULL, *spkey = NULL;
clkey = s->cert->key->privatekey;
/* If client key not DH assume it can be used */
if (EVP_PKEY_id(clkey) != EVP_PKEY_DH)
return 1;
if (i >= 0)
spkey = X509_get_pubkey(scert->peer_pkeys[i].x509);
if (spkey) {
/* Compare server and client parameters */
i = EVP_PKEY_cmp_parameters(clkey, spkey);
EVP_PKEY_free(spkey);
if (i != 1)
return 0;
}
s->s3->flags |= TLS1_FLAGS_SKIP_CERT_VERIFY;
}
return 1;
}
| 0
|
CVE-2015-3196
|
CWE-362
|
static void spl_array_it_move_forward(zend_object_iterator *iter TSRMLS_DC) /* {{{ */
{
spl_array_it *iterator = (spl_array_it *)iter;
spl_array_object *object = iterator->object;
HashTable *aht = spl_array_get_hash_table(object, 0 TSRMLS_CC);
if (object->ar_flags & SPL_ARRAY_OVERLOADED_NEXT) {
zend_user_it_move_forward(iter TSRMLS_CC);
} else {
zend_user_it_invalidate_current(iter TSRMLS_CC);
if (!aht) {
php_error_docref(NULL TSRMLS_CC, E_NOTICE, "ArrayIterator::current(): Array was modified outside object and is no longer an array");
return;
}
if ((object->ar_flags & SPL_ARRAY_IS_REF) && spl_hash_verify_pos_ex(object, aht TSRMLS_CC) == FAILURE) {
php_error_docref(NULL TSRMLS_CC, E_NOTICE, "ArrayIterator::next(): Array was modified outside object and internal position is no longer valid");
} else {
spl_array_next_no_verify(object, aht TSRMLS_CC);
}
}
}
/* }}} */
| 0
|
CVE-2016-7417
|
CWE-20
|
static int ssdp_init(int in, char *iflist[], size_t num)
{
int modified;
size_t i;
struct ifaddrs *ifaddrs, *ifa;
logit(LOG_INFO, "Updating interfaces ...");
if (getifaddrs(&ifaddrs) < 0) {
logit(LOG_ERR, "Failed getifaddrs(): %s", strerror(errno));
return -1;
}
/* Mark all outbound interfaces as stale */
mark();
/* First pass, clear stale marker from exact matches */
for (ifa = ifaddrs; ifa; ifa = ifa->ifa_next) {
struct ifsock *ifs;
/* Do we already have it? */
ifs = find_iface(ifa->ifa_addr);
if (ifs) {
ifs->stale = 0;
continue;
}
}
/* Clean out any stale interface addresses */
modified = sweep();
/* Second pass, add new ones */
for (ifa = ifaddrs; ifa; ifa = ifa->ifa_next) {
int sd;
/* Interface filtering, optional command line argument */
if (filter_iface(ifa->ifa_name, iflist, num)) {
logit(LOG_DEBUG, "Skipping %s, not in iflist.", ifa->ifa_name);
continue;
}
/* Do we have another in the same subnet? */
if (filter_addr(ifa->ifa_addr))
continue;
sd = open_socket(ifa->ifa_name, ifa->ifa_addr, MC_SSDP_PORT);
if (sd < 0)
continue;
multicast_join(in, ifa->ifa_addr);
if (register_socket(in, sd, ifa->ifa_addr, ifa->ifa_netmask, ssdp_recv)) {
close(sd);
break;
}
modified++;
}
freeifaddrs(ifaddrs);
return modified;
}
| 0
|
CVE-2019-14323
|
CWE-119
|
static noinline int btrfs_ioctl_snap_create_v2(struct file *file,
void __user *arg, int subvol)
{
struct btrfs_ioctl_vol_args_v2 *vol_args;
int ret;
u64 transid = 0;
u64 *ptr = NULL;
bool readonly = false;
struct btrfs_qgroup_inherit *inherit = NULL;
vol_args = memdup_user(arg, sizeof(*vol_args));
if (IS_ERR(vol_args))
return PTR_ERR(vol_args);
vol_args->name[BTRFS_SUBVOL_NAME_MAX] = '\0';
if (vol_args->flags &
~(BTRFS_SUBVOL_CREATE_ASYNC | BTRFS_SUBVOL_RDONLY |
BTRFS_SUBVOL_QGROUP_INHERIT)) {
ret = -EOPNOTSUPP;
goto out;
}
if (vol_args->flags & BTRFS_SUBVOL_CREATE_ASYNC)
ptr = &transid;
if (vol_args->flags & BTRFS_SUBVOL_RDONLY)
readonly = true;
if (vol_args->flags & BTRFS_SUBVOL_QGROUP_INHERIT) {
if (vol_args->size > PAGE_CACHE_SIZE) {
ret = -EINVAL;
goto out;
}
inherit = memdup_user(vol_args->qgroup_inherit, vol_args->size);
if (IS_ERR(inherit)) {
ret = PTR_ERR(inherit);
goto out;
}
}
ret = btrfs_ioctl_snap_create_transid(file, vol_args->name,
vol_args->fd, subvol, ptr,
readonly, &inherit);
if (ret == 0 && ptr &&
copy_to_user(arg +
offsetof(struct btrfs_ioctl_vol_args_v2,
transid), ptr, sizeof(*ptr)))
ret = -EFAULT;
out:
kfree(vol_args);
kfree(inherit);
return ret;
}
| 0
|
CVE-2012-5375
|
CWE-310
|
static struct sctp_association *__sctp_rcv_walk_lookup(struct sk_buff *skb,
const union sctp_addr *laddr,
struct sctp_transport **transportp)
{
struct sctp_association *asoc = NULL;
sctp_chunkhdr_t *ch;
int have_auth = 0;
unsigned int chunk_num = 1;
__u8 *ch_end;
/* Walk through the chunks looking for AUTH or ASCONF chunks
* to help us find the association.
*/
ch = (sctp_chunkhdr_t *) skb->data;
do {
/* Break out if chunk length is less then minimal. */
if (ntohs(ch->length) < sizeof(sctp_chunkhdr_t))
break;
ch_end = ((__u8 *)ch) + WORD_ROUND(ntohs(ch->length));
if (ch_end > skb_tail_pointer(skb))
break;
switch(ch->type) {
case SCTP_CID_AUTH:
have_auth = chunk_num;
break;
case SCTP_CID_COOKIE_ECHO:
/* If a packet arrives containing an AUTH chunk as
* a first chunk, a COOKIE-ECHO chunk as the second
* chunk, and possibly more chunks after them, and
* the receiver does not have an STCB for that
* packet, then authentication is based on
* the contents of the COOKIE- ECHO chunk.
*/
if (have_auth == 1 && chunk_num == 2)
return NULL;
break;
case SCTP_CID_ASCONF:
if (have_auth || sctp_addip_noauth)
asoc = __sctp_rcv_asconf_lookup(ch, laddr,
sctp_hdr(skb)->source,
transportp);
default:
break;
}
if (asoc)
break;
ch = (sctp_chunkhdr_t *) ch_end;
chunk_num++;
} while (ch_end < skb_tail_pointer(skb));
return asoc;
}
| 0
|
CVE-2011-4348
|
CWE-362
|
void CL_InitDownloads( void ) {
#ifndef PRE_RELEASE_DEMO
char missingfiles[1024];
char *dir = FS_ShiftStr( AUTOUPDATE_DIR, AUTOUPDATE_DIR_SHIFT );
if ( autoupdateStarted && NET_CompareAdr( cls.autoupdateServer, clc.serverAddress ) ) {
if ( strlen( cl_updatefiles->string ) > 4 ) {
Q_strncpyz( autoupdateFilename, cl_updatefiles->string, sizeof( autoupdateFilename ) );
Q_strncpyz( clc.downloadList, va( "@%s/%s@%s/%s", dir, cl_updatefiles->string, dir, cl_updatefiles->string ), MAX_INFO_STRING );
clc.state = CA_CONNECTED;
CL_NextDownload();
return;
}
} else {
if ( !(cl_allowDownload->integer & DLF_ENABLE) ) {
if ( FS_ComparePaks( missingfiles, sizeof( missingfiles ), qfalse ) ) {
Cvar_Set( "com_missingFiles", missingfiles );
} else {
Cvar_Set( "com_missingFiles", "" );
}
Com_Printf( "\nWARNING: You are missing some files referenced by the server:\n%s"
"You might not be able to join the game\n"
"Go to the setting menu to turn on autodownload, or get the file elsewhere\n\n", missingfiles );
}
else if ( FS_ComparePaks( clc.downloadList, sizeof( clc.downloadList ), qtrue ) ) {
Com_Printf( CL_TranslateStringBuf( "Need paks: %s\n" ), clc.downloadList );
if ( *clc.downloadList ) {
clc.state = CA_CONNECTED;
*clc.downloadTempName = *clc.downloadName = 0;
Cvar_Set( "cl_downloadName", "" );
CL_NextDownload();
return;
}
}
}
#endif
CL_DownloadsComplete();
}
| 0
|
CVE-2017-6903
|
CWE-269
|
nodelist_free_all(void)
{
if (PREDICT_UNLIKELY(the_nodelist == NULL))
return;
HT_CLEAR(nodelist_map, &the_nodelist->nodes_by_id);
SMARTLIST_FOREACH_BEGIN(the_nodelist->nodes, node_t *, node) {
node->nodelist_idx = -1;
node_free(node);
} SMARTLIST_FOREACH_END(node);
smartlist_free(the_nodelist->nodes);
tor_free(the_nodelist);
}
| 0
|
CVE-2017-0377
|
CWE-200
|
void V8TestObject::FloatMethodMethodCallback(const v8::FunctionCallbackInfo<v8::Value>& info) {
RUNTIME_CALL_TIMER_SCOPE_DISABLED_BY_DEFAULT(info.GetIsolate(), "Blink_TestObject_floatMethod");
test_object_v8_internal::FloatMethodMethod(info);
}
| 0
|
CVE-2017-5120
| null |
e1000e_set_status(E1000ECore *core, int index, uint32_t val)
{
if ((val & E1000_STATUS_PHYRA) == 0) {
core->mac[index] &= ~E1000_STATUS_PHYRA;
}
}
| 0
|
CVE-2017-9310
|
CWE-835
|
PassRefPtrWillBeRawPtr<Text> Document::createEditingTextNode(const String& text)
{
return Text::createEditingText(*this, text);
}
| 0
|
CVE-2015-6768
|
CWE-264
|
static int unsupported_configure_endpoints(int sub_api, struct libusb_device_handle *dev_handle, int iface) {
PRINT_UNSUPPORTED_API(configure_endpoints);
}
| 0
|
CVE-2018-6125
| null |
static __always_inline void vmcs_clear_bits(unsigned long field, u32 mask)
{
BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x2000,
"vmcs_clear_bits does not support 64-bit fields");
if (static_branch_unlikely(&enable_evmcs))
return evmcs_write32(field, evmcs_read32(field) & ~mask);
__vmcs_writel(field, __vmcs_readl(field) & ~mask);
}
| 0
|
CVE-2018-12904
| null |
void Document::detachNodeIterator(NodeIterator* ni)
{
m_nodeIterators.remove(ni);
}
| 0
|
CVE-2012-5136
|
CWE-20
|
static int dm_wait_for_completion(struct mapped_device *md, long task_state)
{
int r = 0;
DEFINE_WAIT(wait);
while (1) {
prepare_to_wait(&md->wait, &wait, task_state);
if (!md_in_flight(md))
break;
if (signal_pending_state(task_state, current)) {
r = -EINTR;
break;
}
io_schedule();
}
finish_wait(&md->wait, &wait);
return r;
}
| 0
|
CVE-2017-18203
|
CWE-362
|
String pathGetFileName(const String& path)
{
return String(::PathFindFileName(String(path).charactersWithNullTermination()));
}
| 0
|
CVE-2012-2875
| null |
irc_server_free_data (struct t_irc_server *server)
{
int i;
if (!server)
return;
/* free data */
for (i = 0; i < IRC_SERVER_NUM_OPTIONS; i++)
{
if (server->options[i])
weechat_config_option_free (server->options[i]);
}
if (server->name)
free (server->name);
if (server->addresses_array)
weechat_string_free_split (server->addresses_array);
if (server->ports_array)
free (server->ports_array);
if (server->current_address)
free (server->current_address);
if (server->current_ip)
free (server->current_ip);
if (server->hook_connect)
weechat_unhook (server->hook_connect);
if (server->hook_fd)
weechat_unhook (server->hook_fd);
if (server->hook_timer_connection)
weechat_unhook (server->hook_timer_connection);
if (server->hook_timer_sasl)
weechat_unhook (server->hook_timer_sasl);
if (server->unterminated_message)
free (server->unterminated_message);
if (server->nicks_array)
weechat_string_free_split (server->nicks_array);
if (server->nick)
free (server->nick);
if (server->nick_modes)
free (server->nick_modes);
if (server->isupport)
free (server->isupport);
if (server->prefix_modes)
free (server->prefix_modes);
if (server->prefix_chars)
free (server->prefix_chars);
if (server->away_message)
free (server->away_message);
if (server->cmd_list_regexp)
{
regfree (server->cmd_list_regexp);
free (server->cmd_list_regexp);
}
for (i = 0; i < IRC_SERVER_NUM_OUTQUEUES_PRIO; i++)
{
irc_server_outqueue_free_all (server, i);
}
irc_notify_free_all (server);
irc_redirect_free_all (server);
if (server->channels)
irc_channel_free_all (server);
if (server->buffer_as_string)
free (server->buffer_as_string);
}
| 0
|
CVE-2011-1428
|
CWE-20
|
void ProcessControlLaunched() {
base::ScopedAllowBlockingForTesting allow_blocking;
base::ProcessId service_pid;
EXPECT_TRUE(GetServiceProcessData(NULL, &service_pid));
EXPECT_NE(static_cast<base::ProcessId>(0), service_pid);
#if defined(OS_WIN)
service_process_ =
base::Process::OpenWithAccess(service_pid,
SYNCHRONIZE | PROCESS_QUERY_INFORMATION);
#else
service_process_ = base::Process::Open(service_pid);
#endif
EXPECT_TRUE(service_process_.IsValid());
base::ThreadTaskRunnerHandle::Get()->PostTask(
FROM_HERE, base::RunLoop::QuitCurrentWhenIdleClosureDeprecated());
}
| 1
|
CVE-2016-5149
|
CWE-94
|
void inet6_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr)
{
struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) uaddr;
sin6->sin6_family = AF_INET6;
sin6->sin6_addr = sk->sk_v6_daddr;
sin6->sin6_port = inet_sk(sk)->inet_dport;
/* We do not store received flowlabel for TCP */
sin6->sin6_flowinfo = 0;
sin6->sin6_scope_id = ipv6_iface_scope_id(&sin6->sin6_addr,
sk->sk_bound_dev_if);
}
| 0
|
CVE-2016-3841
|
CWE-416
|
AdjustWaitForDelay (pointer waitTime, unsigned long newdelay)
{
static struct timeval delay_val;
struct timeval **wt = (struct timeval **) waitTime;
unsigned long olddelay;
if (*wt == NULL)
{
delay_val.tv_sec = newdelay / 1000;
delay_val.tv_usec = 1000 * (newdelay % 1000);
*wt = &delay_val;
}
else
{
olddelay = (*wt)->tv_sec * 1000 + (*wt)->tv_usec / 1000;
if (newdelay < olddelay)
{
(*wt)->tv_sec = newdelay / 1000;
(*wt)->tv_usec = 1000 * (newdelay % 1000);
}
}
}
| 0
|
CVE-2011-4029
|
CWE-362
|
bool HttpResponseHeaders::GetAgeValue(TimeDelta* result) const {
std::string value;
if (!EnumerateHeader(NULL, "Age", &value))
return false;
int64 seconds;
base::StringToInt64(value, &seconds);
*result = TimeDelta::FromSeconds(seconds);
return true;
}
| 0
| null | null |
bool Extension::InitFromValue(int flags, string16* error) {
DCHECK(error);
base::AutoLock auto_lock(runtime_data_lock_);
runtime_data_.SetActivePermissions(new PermissionSet());
optional_permission_set_ = new PermissionSet();
required_permission_set_ = new PermissionSet();
creation_flags_ = flags;
if (!LoadManifestVersion(error))
return false;
if (!CheckMinimumChromeVersion(error))
return false;
if (!LoadRequiredFeatures(error))
return false;
manifest_->GetString(keys::kPublicKey, &public_key_);
extension_url_ = Extension::GetBaseURLFromExtensionId(id());
if (is_app() && !LoadAppFeatures(error))
return false;
APIPermissionSet api_permissions;
URLPatternSet host_permissions;
if (!ParsePermissions(keys::kPermissions,
error,
&api_permissions,
&host_permissions)) {
return false;
}
for (APIPermissionSet::const_iterator i = api_permissions.begin();
i != api_permissions.end(); ++i) {
if ((*i)->info()->must_be_optional()) {
*error = ErrorUtils::FormatErrorMessageUTF16(
errors::kPermissionMustBeOptional, (*i)->info()->name());
return false;
}
}
if (is_platform_app()) {
api_permissions.insert(APIPermission::kAppCurrentWindowInternal);
api_permissions.insert(APIPermission::kAppRuntime);
api_permissions.insert(APIPermission::kAppWindow);
}
APIPermissionSet optional_api_permissions;
URLPatternSet optional_host_permissions;
if (!ParsePermissions(keys::kOptionalPermissions,
error,
&optional_api_permissions,
&optional_host_permissions)) {
return false;
}
if (ContainsManifestForbiddenPermission(api_permissions, error) ||
ContainsManifestForbiddenPermission(optional_api_permissions, error)) {
return false;
}
if (!LoadAppIsolation(api_permissions, error))
return false;
if (!LoadSharedFeatures(api_permissions, error))
return false;
if (!LoadExtensionFeatures(&api_permissions, error))
return false;
if (!LoadThemeFeatures(error))
return false;
if (!LoadManagedModeFeatures(error))
return false;
if (HasMultipleUISurfaces()) {
*error = ASCIIToUTF16(errors::kOneUISurfaceOnly);
return false;
}
finished_parsing_manifest_ = true;
runtime_data_.SetActivePermissions(new PermissionSet(
this, api_permissions, host_permissions));
required_permission_set_ = new PermissionSet(
this, api_permissions, host_permissions);
optional_permission_set_ = new PermissionSet(
optional_api_permissions, optional_host_permissions, URLPatternSet());
return true;
}
| 0
|
CVE-2013-0885
|
CWE-264
|
int snd_ctl_boolean_mono_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
uinfo->type = SNDRV_CTL_ELEM_TYPE_BOOLEAN;
uinfo->count = 1;
uinfo->value.integer.min = 0;
uinfo->value.integer.max = 1;
return 0;
}
| 0
|
CVE-2014-4656
|
CWE-189
|
unsigned int dictObjHash(const void *key) {
const robj *o = key;
return dictGenHashFunction(o->ptr, sdslen((sds)o->ptr));
}
| 0
|
CVE-2016-10517
|
CWE-254
|
void RenderMediaClient::AddKeySystemsInfoForUMA(
std::vector<media::KeySystemInfoForUMA>* key_systems_info_for_uma) {
DVLOG(2) << __func__;
#if defined(WIDEVINE_CDM_AVAILABLE)
key_systems_info_for_uma->push_back(media::KeySystemInfoForUMA(
kWidevineKeySystem, kWidevineKeySystemNameForUMA));
#endif // WIDEVINE_CDM_AVAILABLE
}
| 0
| null | null |
void TabStrip::Init() {
SetID(VIEW_ID_TAB_STRIP);
set_notify_enter_exit_on_child(true);
new_tab_button_ = new NewTabButton(this, this);
new_tab_button_->SetTooltipText(
l10n_util::GetStringUTF16(IDS_TOOLTIP_NEW_TAB));
new_tab_button_->SetAccessibleName(
l10n_util::GetStringUTF16(IDS_ACCNAME_NEWTAB));
new_tab_button_->SetImageVerticalAlignment(views::ImageButton::ALIGN_BOTTOM);
new_tab_button_->SetEventTargeter(
std::make_unique<views::ViewTargeter>(new_tab_button_));
AddChildView(new_tab_button_);
UpdateNewTabButtonBorder();
new_tab_button_bounds_.set_size(new_tab_button_->GetPreferredSize());
if (g_drop_indicator_width == 0) {
gfx::ImageSkia* drop_image = GetDropArrowImage(true);
g_drop_indicator_width = drop_image->width();
g_drop_indicator_height = drop_image->height();
}
UpdateContrastRatioValues();
if (!gfx::Animation::ShouldRenderRichAnimation())
bounds_animator_.SetAnimationDuration(0);
}
| 0
|
CVE-2016-5218
|
CWE-20
|
void free_user_ns(struct user_namespace *ns)
{
struct user_namespace *parent;
do {
parent = ns->parent;
proc_free_inum(ns->proc_inum);
kmem_cache_free(user_ns_cachep, ns);
ns = parent;
} while (atomic_dec_and_test(&parent->count));
}
| 0
|
CVE-2013-1959
|
CWE-264
|
static inline void mntfree(struct mount *mnt)
{
struct vfsmount *m = &mnt->mnt;
struct super_block *sb = m->mnt_sb;
/*
* This probably indicates that somebody messed
* up a mnt_want/drop_write() pair. If this
* happens, the filesystem was probably unable
* to make r/w->r/o transitions.
*/
/*
* The locking used to deal with mnt_count decrement provides barriers,
* so mnt_get_writers() below is safe.
*/
WARN_ON(mnt_get_writers(mnt));
fsnotify_vfsmount_delete(m);
dput(m->mnt_root);
free_vfsmnt(mnt);
deactivate_super(sb);
}
| 0
|
CVE-2013-1957
|
CWE-264
|
static int afiucv_hs_callback_syn(struct sock *sk, struct sk_buff *skb)
{
struct sock *nsk;
struct iucv_sock *iucv, *niucv;
struct af_iucv_trans_hdr *trans_hdr;
int err;
iucv = iucv_sk(sk);
trans_hdr = (struct af_iucv_trans_hdr *)skb->data;
if (!iucv) {
/* no sock - connection refused */
afiucv_swap_src_dest(skb);
trans_hdr->flags = AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_FIN;
err = dev_queue_xmit(skb);
goto out;
}
nsk = iucv_sock_alloc(NULL, sk->sk_type, GFP_ATOMIC);
bh_lock_sock(sk);
if ((sk->sk_state != IUCV_LISTEN) ||
sk_acceptq_is_full(sk) ||
!nsk) {
/* error on server socket - connection refused */
if (nsk)
sk_free(nsk);
afiucv_swap_src_dest(skb);
trans_hdr->flags = AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_FIN;
err = dev_queue_xmit(skb);
bh_unlock_sock(sk);
goto out;
}
niucv = iucv_sk(nsk);
iucv_sock_init(nsk, sk);
niucv->transport = AF_IUCV_TRANS_HIPER;
niucv->msglimit = iucv->msglimit;
if (!trans_hdr->window)
niucv->msglimit_peer = IUCV_HIPER_MSGLIM_DEFAULT;
else
niucv->msglimit_peer = trans_hdr->window;
memcpy(niucv->dst_name, trans_hdr->srcAppName, 8);
memcpy(niucv->dst_user_id, trans_hdr->srcUserID, 8);
memcpy(niucv->src_name, iucv->src_name, 8);
memcpy(niucv->src_user_id, iucv->src_user_id, 8);
nsk->sk_bound_dev_if = sk->sk_bound_dev_if;
niucv->hs_dev = iucv->hs_dev;
dev_hold(niucv->hs_dev);
afiucv_swap_src_dest(skb);
trans_hdr->flags = AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_ACK;
trans_hdr->window = niucv->msglimit;
/* if receiver acks the xmit connection is established */
err = dev_queue_xmit(skb);
if (!err) {
iucv_accept_enqueue(sk, nsk);
nsk->sk_state = IUCV_CONNECTED;
sk->sk_data_ready(sk, 1);
} else
iucv_sock_kill(nsk);
bh_unlock_sock(sk);
out:
return NET_RX_SUCCESS;
}
| 0
|
CVE-2013-3229
|
CWE-200
|
aura::Window* CreateWindowInWatchedContainer(const InitParams& params) {
aura::test::TestWindowDelegate* delegate = NULL;
if (!params.can_maximize) {
delegate = aura::test::TestWindowDelegate::CreateSelfDestroyingDelegate();
delegate->set_window_component(HTCAPTION);
if (!params.max_size.IsEmpty())
delegate->set_maximum_size(params.max_size);
}
aura::Window* window = aura::test::CreateTestWindowWithDelegateAndType(
delegate, params.type, 0, params.bounds, NULL, params.show_on_creation);
int32_t behavior = aura::client::kResizeBehaviorNone;
behavior |= params.can_resize ? aura::client::kResizeBehaviorCanResize : 0;
behavior |=
params.can_maximize ? aura::client::kResizeBehaviorCanMaximize : 0;
window->SetProperty(aura::client::kResizeBehaviorKey, behavior);
aura::Window* container =
wm::GetSwitchableContainersForRoot(Shell::GetPrimaryRootWindow(),
/*active_desk_only=*/true)[0];
container->AddChild(window);
return window;
}
| 0
|
CVE-2017-5068
|
CWE-362
|
No dataset card yet