Merge branch 'signalwire:master' into bugfix/fix-ivr-zombie
This commit is contained in:
commit
27d6758777
|
@ -255,6 +255,10 @@ SWITCH_BEGIN_EXTERN_C
|
|||
|
||||
typedef uint8_t switch_byte_t;
|
||||
|
||||
typedef struct {
|
||||
unsigned int value : 31;
|
||||
} switch_uint31_t;
|
||||
|
||||
typedef enum {
|
||||
SWITCH_PVT_PRIMARY = 0,
|
||||
SWITCH_PVT_SECONDARY
|
||||
|
|
|
@ -1581,7 +1581,6 @@ GCC_DIAG_ON(deprecated-declarations)
|
|||
} else if (ret == AVERROR(EAGAIN)) {
|
||||
/* we fully drain all the output in each encode call, so this should not ever happen */
|
||||
switch_log_printf(SWITCH_CHANNEL_LOG, SWITCH_LOG_DEBUG9, "Error sending frame to encoder - BUG, should never happen\n");
|
||||
ret = AVERROR_BUG;
|
||||
goto error;
|
||||
} else if (ret < 0) {
|
||||
switch_log_printf(SWITCH_CHANNEL_LOG, SWITCH_LOG_ERROR, "Error sending frame to encoder\n");
|
||||
|
|
|
@ -1170,7 +1170,6 @@ GCC_DIAG_ON(deprecated-declarations)
|
|||
} else if (ret == AVERROR(EAGAIN)) {
|
||||
/* we fully drain all the output in each encode call, so this should not ever happen */
|
||||
switch_log_printf(SWITCH_CHANNEL_LOG, SWITCH_LOG_DEBUG9, "Error sending frame to encoder on draining AVERROR_BUG - should never happen\n");
|
||||
ret = AVERROR_BUG;
|
||||
goto do_break;
|
||||
} else if (ret < 0) {
|
||||
switch_log_printf(SWITCH_CHANNEL_LOG, SWITCH_LOG_DEBUG9, "Error sending frame to encoder on draining\n");
|
||||
|
@ -1426,7 +1425,7 @@ static switch_status_t open_input_file(av_file_context_t *context, switch_file_h
|
|||
|
||||
av_dump_format(context->fc, 0, filename, 0);
|
||||
|
||||
for (i = 0; i< context->fc->nb_streams; i++) {
|
||||
for (i = 0; i < context->fc->nb_streams; i++) {
|
||||
enum AVMediaType codec_type = av_get_codec_type(context->fc->streams[i]);
|
||||
|
||||
if (codec_type == AVMEDIA_TYPE_AUDIO && context->has_audio < 2 && idx < 2) {
|
||||
|
@ -1554,7 +1553,9 @@ static switch_status_t open_input_file(av_file_context_t *context, switch_file_h
|
|||
if (context->has_audio) {
|
||||
AVCodecContext *c[2] = { NULL };
|
||||
|
||||
c[0] = av_get_codec_context(&context->audio_st[0]);
|
||||
if (!(c[0] = av_get_codec_context(&context->audio_st[0]))) {
|
||||
switch_goto_status(SWITCH_STATUS_FALSE, err);
|
||||
}
|
||||
|
||||
if ((cc = av_get_codec_context(&context->audio_st[1]))) {
|
||||
c[1] = cc;
|
||||
|
@ -1568,9 +1569,7 @@ static switch_status_t open_input_file(av_file_context_t *context, switch_file_h
|
|||
if (c[1]) {
|
||||
context->audio_st[1].frame = av_frame_alloc();
|
||||
switch_assert(context->audio_st[1].frame);
|
||||
}
|
||||
|
||||
if (c[0] && c[1]) {
|
||||
context->audio_st[0].channels = 1;
|
||||
context->audio_st[1].channels = 1;
|
||||
} else {
|
||||
|
@ -2016,7 +2015,6 @@ GCC_DIAG_ON(deprecated-declarations)
|
|||
} else if (dret == AVERROR(EAGAIN)) {
|
||||
/* we fully drain all the output in each decode call, so this should not ever happen */
|
||||
switch_log_printf(SWITCH_CHANNEL_LOG, SWITCH_LOG_DEBUG9, "Error sending audio packet to decoder - BUG, should never happen\n");
|
||||
dret = AVERROR_BUG;
|
||||
goto do_continue;
|
||||
} else if (dret < 0) {
|
||||
switch_log_printf(SWITCH_CHANNEL_LOG, SWITCH_LOG_DEBUG9, "Error sending audio packet to decoder\n");
|
||||
|
|
|
@ -2130,9 +2130,7 @@ static enum avmd_detection_mode avmd_process_sample(avmd_session_t *s, circ_buff
|
|||
sma_buffer_t *sqa_amp_b = &buffer->sqa_amp_b;
|
||||
|
||||
if (sample_to_skip_n > 0) {
|
||||
sample_to_skip_n--;
|
||||
valid_amplitude = 0;
|
||||
valid_omega = 0;
|
||||
|
||||
return AVMD_DETECT_NONE;
|
||||
}
|
||||
|
||||
|
@ -2145,14 +2143,14 @@ static enum avmd_detection_mode avmd_process_sample(avmd_session_t *s, circ_buff
|
|||
RESET_SMA_BUFFER(sma_amp_b);
|
||||
RESET_SMA_BUFFER(sqa_amp_b);
|
||||
buffer->samples_streak_amp = s->settings.sample_n_continuous_streak_amp;
|
||||
sample_to_skip_n = s->settings.sample_n_to_skip;
|
||||
}
|
||||
} else {
|
||||
if (ISINF(amplitude)) {
|
||||
amplitude = buffer->amplitude_max;
|
||||
}
|
||||
|
||||
if (valid_amplitude == 1) {
|
||||
APPEND_SMA_VAL(sma_amp_b, amplitude); /* append amplitude */
|
||||
APPEND_SMA_VAL(sma_amp_b, amplitude); /* append amplitude */
|
||||
APPEND_SMA_VAL(sqa_amp_b, amplitude * amplitude);
|
||||
if (s->settings.require_continuous_streak_amp == 1) {
|
||||
if (buffer->samples_streak_amp > 0) {
|
||||
|
@ -2161,6 +2159,7 @@ static enum avmd_detection_mode avmd_process_sample(avmd_session_t *s, circ_buff
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (sma_amp_b->sma > buffer->amplitude_max) {
|
||||
buffer->amplitude_max = sma_amp_b->sma;
|
||||
}
|
||||
|
@ -2176,9 +2175,7 @@ static enum avmd_detection_mode avmd_process_sample(avmd_session_t *s, circ_buff
|
|||
RESET_SMA_BUFFER(sma_b_fir);
|
||||
RESET_SMA_BUFFER(sqa_b_fir);
|
||||
buffer->samples_streak = s->settings.sample_n_continuous_streak;
|
||||
sample_to_skip_n = s->settings.sample_n_to_skip;
|
||||
}
|
||||
sample_to_skip_n = s->settings.sample_n_to_skip;
|
||||
} else if (omega < -0.99999 || omega > 0.99999) {
|
||||
valid_omega = 0;
|
||||
if (s->settings.require_continuous_streak == 1) {
|
||||
|
@ -2187,7 +2184,6 @@ static enum avmd_detection_mode avmd_process_sample(avmd_session_t *s, circ_buff
|
|||
RESET_SMA_BUFFER(sma_b_fir);
|
||||
RESET_SMA_BUFFER(sqa_b_fir);
|
||||
buffer->samples_streak = s->settings.sample_n_continuous_streak;
|
||||
sample_to_skip_n = s->settings.sample_n_to_skip;
|
||||
}
|
||||
} else {
|
||||
if (valid_omega) {
|
||||
|
@ -2216,20 +2212,26 @@ static enum avmd_detection_mode avmd_process_sample(avmd_session_t *s, circ_buff
|
|||
if (((mode == AVMD_DETECT_AMP) || (mode == AVMD_DETECT_BOTH)) && (valid_amplitude == 1)) {
|
||||
v_amp = sqa_amp_b->sma - (sma_amp_b->sma * sma_amp_b->sma); /* calculate variance of amplitude (biased estimator) */
|
||||
if ((mode == AVMD_DETECT_AMP) && (avmd_decision_amplitude(s, buffer, v_amp, AVMD_AMPLITUDE_RSD_THRESHOLD) == 1)) {
|
||||
|
||||
return AVMD_DETECT_AMP;
|
||||
}
|
||||
}
|
||||
|
||||
if (((mode == AVMD_DETECT_FREQ) || (mode == AVMD_DETECT_BOTH)) && (valid_omega == 1)) {
|
||||
v_fir = sqa_b_fir->sma - (sma_b_fir->sma * sma_b_fir->sma); /* calculate variance of filtered samples */
|
||||
if ((mode == AVMD_DETECT_FREQ) && (avmd_decision_freq(s, buffer, v_fir, AVMD_VARIANCE_RSD_THRESHOLD) == 1)) {
|
||||
|
||||
return AVMD_DETECT_FREQ;
|
||||
}
|
||||
|
||||
if (mode == AVMD_DETECT_BOTH) {
|
||||
if ((avmd_decision_amplitude(s, buffer, v_amp, AVMD_AMPLITUDE_RSD_THRESHOLD) == 1) && (avmd_decision_freq(s, buffer, v_fir, AVMD_VARIANCE_RSD_THRESHOLD) == 1)) {
|
||||
if ((avmd_decision_amplitude(s, buffer, v_amp, AVMD_AMPLITUDE_RSD_THRESHOLD) == 1) && (avmd_decision_freq(s, buffer, v_fir, AVMD_VARIANCE_RSD_THRESHOLD) == 1)) {
|
||||
|
||||
return AVMD_DETECT_BOTH;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return AVMD_DETECT_NONE;
|
||||
}
|
||||
|
||||
|
|
|
@ -95,23 +95,23 @@ extern switch_bool_t switch_amrwb_pack_be(unsigned char *shift_buf, int n)
|
|||
|
||||
extern switch_bool_t switch_amrwb_unpack_be(unsigned char *encoded_buf, uint8_t *tmp, int encoded_len)
|
||||
{
|
||||
int framesz, index, ft;
|
||||
int framesz, index;
|
||||
uint8_t shift_tocs[2] = {0x00, 0x00};
|
||||
uint8_t *shift_buf;
|
||||
|
||||
memcpy(shift_tocs, encoded_buf, 2);
|
||||
/* shift for BE */
|
||||
switch_amr_array_lshift(4, shift_tocs, 2);
|
||||
ft = shift_tocs[0] >> 3;
|
||||
ft &= ~(1 << 5); /* Frame Type*/
|
||||
shift_buf = encoded_buf + 1; /* skip CMR */
|
||||
/* shift for BE */
|
||||
switch_amr_array_lshift(2, shift_buf, encoded_len - 1);
|
||||
/* get frame size */
|
||||
index = ((shift_tocs[0] >> 3) & 0x0f);
|
||||
if (index > 10 && index != 0xe && index != 0xf) {
|
||||
|
||||
return SWITCH_FALSE;
|
||||
}
|
||||
|
||||
framesz = switch_amrwb_frame_sizes[index];
|
||||
tmp[0] = shift_tocs[0]; /* save TOC */
|
||||
memcpy(&tmp[1], shift_buf, framesz);
|
||||
|
|
|
@ -191,6 +191,7 @@ static switch_status_t switch_amrwb_init(switch_codec_t *codec, switch_codec_fla
|
|||
if (codec->fmtp_in) {
|
||||
codec->fmtp_out = switch_core_strdup(codec->memory_pool, codec->fmtp_in);
|
||||
}
|
||||
|
||||
return SWITCH_STATUS_SUCCESS;
|
||||
#else
|
||||
struct amrwb_context *context = NULL;
|
||||
|
@ -204,6 +205,7 @@ static switch_status_t switch_amrwb_init(switch_codec_t *codec, switch_codec_fla
|
|||
decoding = (flags & SWITCH_CODEC_FLAG_DECODE);
|
||||
|
||||
if (!(encoding || decoding) || (!(context = switch_core_alloc(codec->memory_pool, sizeof(struct amrwb_context))))) {
|
||||
|
||||
return SWITCH_STATUS_FALSE;
|
||||
} else {
|
||||
|
||||
|
@ -296,6 +298,7 @@ static switch_status_t switch_amrwb_init(switch_codec_t *codec, switch_codec_fla
|
|||
|
||||
/* re-create mode-set */
|
||||
fmtptmp_pos = switch_snprintf(fmtptmp, sizeof(fmtptmp), "mode-set=");
|
||||
|
||||
for (i = 0; SWITCH_AMRWB_MODES-1 > i; ++i) {
|
||||
if (context->enc_modes & (1 << i)) {
|
||||
fmtptmp_pos += switch_snprintf(fmtptmp + fmtptmp_pos, sizeof(fmtptmp) - fmtptmp_pos, fmtptmp_pos > strlen("mode-set=") ? ",%d" : "%d", i);
|
||||
|
@ -312,12 +315,13 @@ static switch_status_t switch_amrwb_init(switch_codec_t *codec, switch_codec_fla
|
|||
}
|
||||
|
||||
if (!globals.volte) {
|
||||
fmtptmp_pos += switch_snprintf(fmtptmp + fmtptmp_pos, sizeof(fmtptmp) - fmtptmp_pos, ";octet-align=%d",
|
||||
switch_snprintf(fmtptmp + fmtptmp_pos, sizeof(fmtptmp) - fmtptmp_pos, ";octet-align=%d",
|
||||
switch_test_flag(context, AMRWB_OPT_OCTET_ALIGN) ? 1 : 0);
|
||||
} else {
|
||||
fmtptmp_pos += switch_snprintf(fmtptmp + fmtptmp_pos, sizeof(fmtptmp) - fmtptmp_pos, ";octet-align=%d;max-red=0;mode-change-capability=2",
|
||||
switch_snprintf(fmtptmp + fmtptmp_pos, sizeof(fmtptmp) - fmtptmp_pos, ";octet-align=%d;max-red=0;mode-change-capability=2",
|
||||
switch_test_flag(context, AMRWB_OPT_OCTET_ALIGN) ? 1 : 0);
|
||||
}
|
||||
|
||||
codec->fmtp_out = switch_core_strdup(codec->memory_pool, fmtptmp);
|
||||
|
||||
context->encoder_state = NULL;
|
||||
|
|
|
@ -776,7 +776,7 @@ struct sofia_profile {
|
|||
int watchdog_enabled;
|
||||
switch_mutex_t *gw_mutex;
|
||||
uint32_t queued_events;
|
||||
uint32_t last_cseq;
|
||||
switch_uint31_t last_cseq;
|
||||
int tls_only;
|
||||
int tls_verify_date;
|
||||
enum tport_tls_verify_policy tls_verify_policy;
|
||||
|
|
|
@ -2112,12 +2112,12 @@ static int sofia_dialog_probe_callback(void *pArg, int argc, char **argv, char *
|
|||
|
||||
#define SOFIA_PRESENCE_COLLISION_DELTA 50
|
||||
#define SOFIA_PRESENCE_ROLLOVER_YEAR (86400 * 365 * SOFIA_PRESENCE_COLLISION_DELTA)
|
||||
static uint32_t check_presence_epoch(void)
|
||||
static switch_uint31_t check_presence_epoch(void)
|
||||
{
|
||||
time_t now = switch_epoch_time_now(NULL);
|
||||
uint32_t callsequence = (uint32_t)((now - mod_sofia_globals.presence_epoch) * SOFIA_PRESENCE_COLLISION_DELTA);
|
||||
switch_uint31_t callsequence = { .value = (uint32_t)((now - mod_sofia_globals.presence_epoch) * SOFIA_PRESENCE_COLLISION_DELTA) };
|
||||
|
||||
if (!mod_sofia_globals.presence_year || callsequence >= SOFIA_PRESENCE_ROLLOVER_YEAR) {
|
||||
if (!mod_sofia_globals.presence_year || callsequence.value >= SOFIA_PRESENCE_ROLLOVER_YEAR) {
|
||||
struct tm tm;
|
||||
switch_mutex_lock(mod_sofia_globals.mutex);
|
||||
tm = *(localtime(&now));
|
||||
|
@ -2125,7 +2125,7 @@ static uint32_t check_presence_epoch(void)
|
|||
if (tm.tm_year != mod_sofia_globals.presence_year) {
|
||||
mod_sofia_globals.presence_epoch = (uint32_t)now - (tm.tm_yday * 86400) - (tm.tm_hour * 60 * 60) - (tm.tm_min * 60) - tm.tm_sec;
|
||||
mod_sofia_globals.presence_year = tm.tm_year;
|
||||
callsequence = (uint32_t)(((uint32_t)now - mod_sofia_globals.presence_epoch) * SOFIA_PRESENCE_COLLISION_DELTA);
|
||||
callsequence.value = (uint32_t)(((uint32_t)now - mod_sofia_globals.presence_epoch) * SOFIA_PRESENCE_COLLISION_DELTA);
|
||||
}
|
||||
|
||||
switch_mutex_unlock(mod_sofia_globals.mutex);
|
||||
|
@ -2136,17 +2136,17 @@ static uint32_t check_presence_epoch(void)
|
|||
|
||||
uint32_t sofia_presence_get_cseq(sofia_profile_t *profile)
|
||||
{
|
||||
uint32_t callsequence;
|
||||
switch_uint31_t callsequence;
|
||||
int diff = 0;
|
||||
|
||||
switch_mutex_lock(profile->ireg_mutex);
|
||||
|
||||
callsequence = check_presence_epoch();
|
||||
|
||||
if (profile->last_cseq) {
|
||||
diff = callsequence - profile->last_cseq;
|
||||
if (profile->last_cseq.value) {
|
||||
diff = (int)callsequence.value - (int)profile->last_cseq.value;
|
||||
if (diff <= 0 && diff > -100000) {
|
||||
callsequence = ++profile->last_cseq;
|
||||
callsequence.value = ++profile->last_cseq.value;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2154,8 +2154,7 @@ uint32_t sofia_presence_get_cseq(sofia_profile_t *profile)
|
|||
|
||||
switch_mutex_unlock(profile->ireg_mutex);
|
||||
|
||||
return callsequence;
|
||||
|
||||
return (uint32_t)callsequence.value;
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -1096,7 +1096,8 @@ static switch_status_t vlc_file_read(switch_file_handle_t *handle, void *data, s
|
|||
switch_thread_cond_wait(context->cond, context->cond_mutex);
|
||||
status = libvlc_media_get_state(context->m);
|
||||
}
|
||||
switch_mutex_lock(context->cond_mutex);
|
||||
|
||||
switch_mutex_unlock(context->cond_mutex);
|
||||
|
||||
if (context->err == 1) {
|
||||
switch_log_printf(SWITCH_CHANNEL_LOG, SWITCH_LOG_ERROR, "VLC error\n");
|
||||
|
|
|
@ -0,0 +1,76 @@
|
|||
`gdb` scripts
|
||||
-----------
|
||||
|
||||
Originally from: https://github.com/facebook/folly/tree/593b6e76881042031b7f21d898c8e0874ea79fe0/folly/experimental/gdb
|
||||
|
||||
This directory contains a collection of `gdb` scripts that we have found helpful.
|
||||
These scripts use the [gdb extension Python API](https://sourceware.org/gdb/current/onlinedocs/gdb/Python.html#Python).
|
||||
|
||||
### How to run the scripts
|
||||
|
||||
To run the scripts, fire up `gdb` and load a script with `source -v`. Example:
|
||||
|
||||
```lang=bash
|
||||
$ gdb -p 123456
|
||||
(gdb) source -v ./folly/experimental/gdb/deadlock.py
|
||||
Type "deadlock" to detect deadlocks.
|
||||
# At this point, any new commands defined in `deadlock.py` are available.
|
||||
(gdb) deadlock
|
||||
Found deadlock!
|
||||
...
|
||||
```
|
||||
|
||||
### What does each script do?
|
||||
|
||||
#### `deadlock.py` - Detect deadlocks
|
||||
|
||||
Consider the following program that always deadlocks:
|
||||
|
||||
```lang=cpp
|
||||
void deadlock3() {
|
||||
std::mutex m1, m2, m3;
|
||||
folly::Baton<> b1, b2, b3;
|
||||
|
||||
auto t1 = std::thread([&m1, &m2, &b1, &b2] {
|
||||
std::lock_guard<std::mutex> g1(m1);
|
||||
b1.post();
|
||||
b2.wait();
|
||||
std::lock_guard<std::mutex> g2(m2);
|
||||
});
|
||||
|
||||
auto t2 = std::thread([&m3, &m2, &b3, &b2] {
|
||||
std::lock_guard<std::mutex> g2(m2);
|
||||
b2.post();
|
||||
b3.wait();
|
||||
std::lock_guard<std::mutex> g3(m3);
|
||||
});
|
||||
|
||||
auto t3 = std::thread([&m3, &m1, &b3, &b1] {
|
||||
std::lock_guard<std::mutex> g3(m3);
|
||||
b3.post();
|
||||
b1.wait();
|
||||
std::lock_guard<std::mutex> g1(m1);
|
||||
});
|
||||
|
||||
t1.join();
|
||||
t2.join();
|
||||
t3.join();
|
||||
}
|
||||
```
|
||||
|
||||
The `deadlock.py` script introduces a new `deadlock` command that can help
|
||||
us identify the threads and mutexes involved with the deadlock.
|
||||
|
||||
```lang=bash
|
||||
$ gdb -p 2174496
|
||||
(gdb) source -v ./folly/experimental/gdb/deadlock.py
|
||||
Type "deadlock" to detect deadlocks.
|
||||
(gdb) deadlock
|
||||
Found deadlock!
|
||||
Thread 2 (LWP 2174497) is waiting on mutex (0x00007ffcff42a4c0) held by Thread 3 (LWP 2174498)
|
||||
Thread 3 (LWP 2174498) is waiting on mutex (0x00007ffcff42a4f0) held by Thread 4 (LWP 2174499)
|
||||
Thread 4 (LWP 2174499) is waiting on mutex (0x00007ffcff42a490) held by Thread 2 (LWP 2174497)
|
||||
```
|
||||
|
||||
NOTE: This script only works on Linux and requires debug symbols to be installed
|
||||
for the `pthread` library.
|
|
@ -0,0 +1,474 @@
|
|||
#!/usr/bin/env python3
|
||||
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import re
|
||||
from collections import defaultdict
|
||||
from enum import Enum
|
||||
|
||||
import gdb
|
||||
|
||||
|
||||
class DiGraph:
|
||||
"""
|
||||
Adapted from networkx: http://networkx.github.io/
|
||||
Represents a directed graph. Edges can store (key, value) attributes.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
# Map of node -> set of nodes
|
||||
self.adjacency_map = {}
|
||||
# Map of (node1, node2) -> map string -> arbitrary attribute
|
||||
# This will not be copied in subgraph()
|
||||
self.attributes_map = {}
|
||||
|
||||
def neighbors(self, node):
|
||||
return self.adjacency_map.get(node, set())
|
||||
|
||||
def edges(self):
|
||||
edges = []
|
||||
for node, neighbors in self.adjacency_map.items():
|
||||
for neighbor in neighbors:
|
||||
edges.append((node, neighbor))
|
||||
return edges
|
||||
|
||||
def nodes(self):
|
||||
return self.adjacency_map.keys()
|
||||
|
||||
def attributes(self, node1, node2):
|
||||
return self.attributes_map[(node1, node2)]
|
||||
|
||||
def add_edge(self, node1, node2, **kwargs):
|
||||
if node1 not in self.adjacency_map:
|
||||
self.adjacency_map[node1] = set()
|
||||
if node2 not in self.adjacency_map:
|
||||
self.adjacency_map[node2] = set()
|
||||
self.adjacency_map[node1].add(node2)
|
||||
self.attributes_map[(node1, node2)] = kwargs
|
||||
|
||||
def remove_node(self, node):
|
||||
self.adjacency_map.pop(node, None)
|
||||
for _, neighbors in self.adjacency_map.items():
|
||||
neighbors.discard(node)
|
||||
|
||||
def subgraph(self, nodes):
|
||||
graph = DiGraph()
|
||||
for node in nodes:
|
||||
for neighbor in self.neighbors(node):
|
||||
if neighbor in nodes:
|
||||
graph.add_edge(node, neighbor)
|
||||
return graph
|
||||
|
||||
def node_link_data(self):
|
||||
"""
|
||||
Returns the graph as a dictionary in a format that can be
|
||||
serialized.
|
||||
"""
|
||||
data = {
|
||||
"directed": True,
|
||||
"multigraph": False,
|
||||
"graph": {},
|
||||
"links": [],
|
||||
"nodes": [],
|
||||
}
|
||||
|
||||
# Do one pass to build a map of node -> position in nodes
|
||||
node_to_number = {}
|
||||
for node in self.adjacency_map.keys():
|
||||
node_to_number[node] = len(data["nodes"])
|
||||
data["nodes"].append({"id": node})
|
||||
|
||||
# Do another pass to build the link information
|
||||
for node, neighbors in self.adjacency_map.items():
|
||||
for neighbor in neighbors:
|
||||
link = self.attributes_map[(node, neighbor)].copy()
|
||||
link["source"] = node_to_number[node]
|
||||
link["target"] = node_to_number[neighbor]
|
||||
data["links"].append(link)
|
||||
return data
|
||||
|
||||
|
||||
def strongly_connected_components(G): # noqa: C901
|
||||
"""
|
||||
Adapted from networkx: http://networkx.github.io/
|
||||
Parameters
|
||||
----------
|
||||
G : DiGraph
|
||||
Returns
|
||||
-------
|
||||
comp : generator of sets
|
||||
A generator of sets of nodes, one for each strongly connected
|
||||
component of G.
|
||||
"""
|
||||
preorder = {}
|
||||
lowlink = {}
|
||||
scc_found = {}
|
||||
scc_queue = []
|
||||
i = 0 # Preorder counter
|
||||
for source in G.nodes():
|
||||
if source not in scc_found:
|
||||
queue = [source]
|
||||
while queue:
|
||||
v = queue[-1]
|
||||
if v not in preorder:
|
||||
i = i + 1
|
||||
preorder[v] = i
|
||||
done = 1
|
||||
v_nbrs = G.neighbors(v)
|
||||
for w in v_nbrs:
|
||||
if w not in preorder:
|
||||
queue.append(w)
|
||||
done = 0
|
||||
break
|
||||
if done == 1:
|
||||
lowlink[v] = preorder[v]
|
||||
for w in v_nbrs:
|
||||
if w not in scc_found:
|
||||
if preorder[w] > preorder[v]:
|
||||
lowlink[v] = min([lowlink[v], lowlink[w]])
|
||||
else:
|
||||
lowlink[v] = min([lowlink[v], preorder[w]])
|
||||
queue.pop()
|
||||
if lowlink[v] == preorder[v]:
|
||||
scc_found[v] = True
|
||||
scc = {v}
|
||||
while scc_queue and preorder[scc_queue[-1]] > preorder[v]:
|
||||
k = scc_queue.pop()
|
||||
scc_found[k] = True
|
||||
scc.add(k)
|
||||
yield scc
|
||||
else:
|
||||
scc_queue.append(v)
|
||||
|
||||
|
||||
def simple_cycles(G): # noqa: C901
|
||||
"""
|
||||
Adapted from networkx: http://networkx.github.io/
|
||||
Parameters
|
||||
----------
|
||||
G : DiGraph
|
||||
Returns
|
||||
-------
|
||||
cycle_generator: generator
|
||||
A generator that produces elementary cycles of the graph.
|
||||
Each cycle is represented by a list of nodes along the cycle.
|
||||
"""
|
||||
|
||||
def _unblock(thisnode, blocked, B):
|
||||
stack = {thisnode}
|
||||
while stack:
|
||||
node = stack.pop()
|
||||
if node in blocked:
|
||||
blocked.remove(node)
|
||||
stack.update(B[node])
|
||||
B[node].clear()
|
||||
|
||||
# Johnson's algorithm requires some ordering of the nodes.
|
||||
# We assign the arbitrary ordering given by the strongly connected comps
|
||||
# There is no need to track the ordering as each node removed as processed.
|
||||
# save the actual graph so we can mutate it here
|
||||
# We only take the edges because we do not want to
|
||||
# copy edge and node attributes here.
|
||||
subG = G.subgraph(G.nodes())
|
||||
sccs = list(strongly_connected_components(subG))
|
||||
while sccs:
|
||||
scc = sccs.pop()
|
||||
# order of scc determines ordering of nodes
|
||||
startnode = scc.pop()
|
||||
# Processing node runs 'circuit' routine from recursive version
|
||||
path = [startnode]
|
||||
blocked = set() # vertex: blocked from search?
|
||||
closed = set() # nodes involved in a cycle
|
||||
blocked.add(startnode)
|
||||
B = defaultdict(set) # graph portions that yield no elementary circuit
|
||||
stack = [(startnode, list(subG.neighbors(startnode)))]
|
||||
while stack:
|
||||
thisnode, nbrs = stack[-1]
|
||||
if nbrs:
|
||||
nextnode = nbrs.pop()
|
||||
if nextnode == startnode:
|
||||
yield path[:]
|
||||
closed.update(path)
|
||||
elif nextnode not in blocked:
|
||||
path.append(nextnode)
|
||||
stack.append((nextnode, list(subG.neighbors(nextnode))))
|
||||
closed.discard(nextnode)
|
||||
blocked.add(nextnode)
|
||||
continue
|
||||
# done with nextnode... look for more neighbors
|
||||
if not nbrs: # no more nbrs
|
||||
if thisnode in closed:
|
||||
_unblock(thisnode, blocked, B)
|
||||
else:
|
||||
for nbr in subG.neighbors(thisnode):
|
||||
if thisnode not in B[nbr]:
|
||||
B[nbr].add(thisnode)
|
||||
stack.pop()
|
||||
path.pop()
|
||||
# done processing this node
|
||||
subG.remove_node(startnode)
|
||||
H = subG.subgraph(scc) # make smaller to avoid work in SCC routine
|
||||
sccs.extend(list(strongly_connected_components(H)))
|
||||
|
||||
|
||||
def find_cycle(graph):
|
||||
"""
|
||||
Looks for a cycle in the graph. If found, returns the first cycle.
|
||||
If nodes a1, a2, ..., an are in a cycle, then this returns:
|
||||
[(a1,a2), (a2,a3), ... (an-1,an), (an, a1)]
|
||||
Otherwise returns an empty list.
|
||||
"""
|
||||
cycles = list(simple_cycles(graph))
|
||||
if cycles:
|
||||
nodes = cycles[0]
|
||||
nodes.append(nodes[0])
|
||||
edges = []
|
||||
prev = nodes[0]
|
||||
for node in nodes[1:]:
|
||||
edges.append((prev, node))
|
||||
prev = node
|
||||
return edges
|
||||
else:
|
||||
return []
|
||||
|
||||
|
||||
def get_stacktrace(thread_id):
|
||||
"""
|
||||
Returns the stack trace for the thread id as a list of strings.
|
||||
"""
|
||||
gdb.execute("thread %d" % thread_id, from_tty=False, to_string=True)
|
||||
output = gdb.execute("bt", from_tty=False, to_string=True)
|
||||
stacktrace_lines = output.strip().split("\n")
|
||||
return stacktrace_lines
|
||||
|
||||
|
||||
def is_thread_blocked_with_frame(
|
||||
thread_id, top_line, expected_top_lines, expected_frame
|
||||
):
|
||||
"""
|
||||
Returns True if we found expected_top_line in top_line, and
|
||||
we found the expected_frame in the thread's stack trace.
|
||||
"""
|
||||
if all(expected not in top_line for expected in expected_top_lines):
|
||||
return False
|
||||
stacktrace_lines = get_stacktrace(thread_id)
|
||||
return any(expected_frame in line for line in stacktrace_lines)
|
||||
|
||||
|
||||
class MutexType(Enum):
|
||||
"""Types of mutexes that we can detect deadlocks."""
|
||||
|
||||
PTHREAD_MUTEX_T = "pthread_mutex_t"
|
||||
PTHREAD_RWLOCK_T = "pthread_rwlock_t"
|
||||
|
||||
@staticmethod
|
||||
def get_mutex_type(thread_id, top_line):
|
||||
"""
|
||||
Returns the probable mutex type, based on the first line
|
||||
of the thread's stack. Returns None if not found.
|
||||
"""
|
||||
|
||||
WAITLIST = [
|
||||
"__lll_lock_wait",
|
||||
"futex_abstimed_wait",
|
||||
"futex_abstimed_wait_cancelable",
|
||||
"futex_reltimed_wait",
|
||||
"futex_reltimed_wait_cancelable",
|
||||
"futex_wait",
|
||||
"futex_wait_cancelable",
|
||||
]
|
||||
|
||||
if is_thread_blocked_with_frame(thread_id, top_line, WAITLIST, "pthread_mutex"):
|
||||
return MutexType.PTHREAD_MUTEX_T
|
||||
if is_thread_blocked_with_frame(
|
||||
thread_id, top_line, WAITLIST, "pthread_rwlock"
|
||||
):
|
||||
return MutexType.PTHREAD_RWLOCK_T
|
||||
return None
|
||||
|
||||
@staticmethod
|
||||
def get_mutex_owner_and_address_func_for_type(mutex_type):
|
||||
"""
|
||||
Returns a function to resolve the mutex owner and address for
|
||||
the given type. The returned function f has the following
|
||||
signature:
|
||||
|
||||
f: args: (map of thread lwp -> thread id), blocked thread lwp
|
||||
returns: (lwp of thread owning mutex, mutex address)
|
||||
or (None, None) if not found.
|
||||
|
||||
Returns None if there is no function for this mutex_type.
|
||||
"""
|
||||
if mutex_type == MutexType.PTHREAD_MUTEX_T:
|
||||
return get_pthread_mutex_t_owner_and_address
|
||||
if mutex_type == MutexType.PTHREAD_RWLOCK_T:
|
||||
return get_pthread_rwlock_t_owner_and_address
|
||||
return None
|
||||
|
||||
|
||||
def print_cycle(graph, lwp_to_thread_id, cycle):
|
||||
"""Prints the threads and mutexes involved in the deadlock."""
|
||||
for m, n in cycle:
|
||||
print(
|
||||
"Thread %d (LWP %d) is waiting on %s (0x%016x) held by "
|
||||
"Thread %d (LWP %d)"
|
||||
% (
|
||||
lwp_to_thread_id[m],
|
||||
m,
|
||||
graph.attributes(m, n)["mutex_type"].value,
|
||||
graph.attributes(m, n)["mutex"],
|
||||
lwp_to_thread_id[n],
|
||||
n,
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
def get_thread_info():
|
||||
"""
|
||||
Returns a pair of:
|
||||
- map of LWP -> thread ID
|
||||
- map of blocked threads LWP -> potential mutex type
|
||||
"""
|
||||
# LWP -> thread ID
|
||||
lwp_to_thread_id = {}
|
||||
|
||||
# LWP -> potential mutex type it is blocked on
|
||||
blocked_threads = {}
|
||||
|
||||
output = gdb.execute("info threads", from_tty=False, to_string=True)
|
||||
lines = output.strip().split("\n")[1:]
|
||||
regex = re.compile(r"[\s\*]*(\d+).*Thread.*\(LWP (\d+)\).*")
|
||||
for line in lines:
|
||||
try:
|
||||
thread_id = int(regex.match(line).group(1))
|
||||
thread_lwp = int(regex.match(line).group(2))
|
||||
lwp_to_thread_id[thread_lwp] = thread_id
|
||||
mutex_type = MutexType.get_mutex_type(thread_id, line)
|
||||
if mutex_type:
|
||||
blocked_threads[thread_lwp] = mutex_type
|
||||
except Exception:
|
||||
continue
|
||||
|
||||
return (lwp_to_thread_id, blocked_threads)
|
||||
|
||||
|
||||
def get_pthread_mutex_t_owner_and_address(lwp_to_thread_id, thread_lwp):
|
||||
"""
|
||||
Finds the thread holding the mutex that this thread is blocked on.
|
||||
Returns a pair of (lwp of thread owning mutex, mutex address),
|
||||
or (None, None) if not found.
|
||||
"""
|
||||
# Go up the stack to the pthread_mutex_lock frame
|
||||
gdb.execute(
|
||||
"thread %d" % lwp_to_thread_id[thread_lwp], from_tty=False, to_string=True
|
||||
)
|
||||
gdb.execute("frame 1", from_tty=False, to_string=True)
|
||||
|
||||
# Get the owner of the mutex by inspecting the internal
|
||||
# fields of the mutex.
|
||||
try:
|
||||
mutex_info = gdb.parse_and_eval("mutex").dereference()
|
||||
mutex_owner_lwp = int(mutex_info["__data"]["__owner"])
|
||||
return (mutex_owner_lwp, int(mutex_info.address))
|
||||
except gdb.error:
|
||||
return (None, None)
|
||||
|
||||
|
||||
def get_pthread_rwlock_t_owner_and_address(lwp_to_thread_id, thread_lwp):
|
||||
"""
|
||||
If the thread is waiting on a write-locked pthread_rwlock_t, this will
|
||||
return the pair of:
|
||||
(lwp of thread that is write-owning the mutex, mutex address)
|
||||
or (None, None) if not found, or if the mutex is read-locked.
|
||||
"""
|
||||
# Go up the stack to the pthread_rwlock_{rd|wr}lock frame
|
||||
gdb.execute(
|
||||
"thread %d" % lwp_to_thread_id[thread_lwp], from_tty=False, to_string=True
|
||||
)
|
||||
gdb.execute("frame 2", from_tty=False, to_string=True)
|
||||
|
||||
# Get the owner of the mutex by inspecting the internal
|
||||
# fields of the mutex.
|
||||
try:
|
||||
rwlock_info = gdb.parse_and_eval("rwlock").dereference()
|
||||
rwlock_data = rwlock_info["__data"]
|
||||
field_names = ["__cur_writer", "__writer"]
|
||||
fields = rwlock_data.type.fields()
|
||||
field = [f for f in fields if f.name in field_names][0]
|
||||
rwlock_owner_lwp = int(rwlock_data[field])
|
||||
# We can only track the owner if it is currently write-locked.
|
||||
# If it is not write-locked or if it is currently read-locked,
|
||||
# possibly by multiple threads, we cannot find the owner.
|
||||
if rwlock_owner_lwp != 0:
|
||||
return (rwlock_owner_lwp, int(rwlock_info.address))
|
||||
else:
|
||||
return (None, None)
|
||||
except gdb.error:
|
||||
return (None, None)
|
||||
|
||||
|
||||
class Deadlock(gdb.Command):
|
||||
"""Detects deadlocks"""
|
||||
|
||||
def __init__(self):
|
||||
super(Deadlock, self).__init__("deadlock", gdb.COMMAND_NONE)
|
||||
|
||||
def invoke(self, arg, from_tty):
|
||||
"""Prints the threads and mutexes in a deadlock, if it exists."""
|
||||
lwp_to_thread_id, blocked_threads = get_thread_info()
|
||||
|
||||
# Nodes represent threads. Edge (A,B) exists if thread A
|
||||
# is waiting on a mutex held by thread B.
|
||||
graph = DiGraph()
|
||||
|
||||
# Go through all the blocked threads and see which threads
|
||||
# they are blocked on, and build the thread wait graph.
|
||||
for thread_lwp, mutex_type in blocked_threads.items():
|
||||
get_owner_and_address_func = (
|
||||
MutexType.get_mutex_owner_and_address_func_for_type(mutex_type)
|
||||
)
|
||||
if not get_owner_and_address_func:
|
||||
continue
|
||||
mutex_owner_lwp, mutex_address = get_owner_and_address_func(
|
||||
lwp_to_thread_id, thread_lwp
|
||||
)
|
||||
if mutex_owner_lwp and mutex_address:
|
||||
graph.add_edge(
|
||||
thread_lwp,
|
||||
mutex_owner_lwp,
|
||||
mutex=mutex_address,
|
||||
mutex_type=mutex_type,
|
||||
)
|
||||
|
||||
# A deadlock exists if there is a cycle in the graph.
|
||||
cycle = find_cycle(graph)
|
||||
if cycle:
|
||||
print("Found deadlock!")
|
||||
print_cycle(graph, lwp_to_thread_id, cycle)
|
||||
else:
|
||||
print("No deadlock detected. " "Do you have debug symbols installed?")
|
||||
|
||||
|
||||
def load():
|
||||
# instantiate the Deadlock command
|
||||
Deadlock()
|
||||
print('Type "deadlock" to detect deadlocks.')
|
||||
|
||||
|
||||
def info():
|
||||
return "Detect deadlocks"
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
load()
|
|
@ -53,6 +53,26 @@ FST_CORE_BEGIN("./conf")
|
|||
}
|
||||
FST_TEARDOWN_END()
|
||||
|
||||
FST_TEST_BEGIN(test_switch_uint31_t_overflow)
|
||||
{
|
||||
switch_uint31_t x;
|
||||
uint32_t overflow;
|
||||
|
||||
x.value = 0x7fffffff;
|
||||
x.value++;
|
||||
|
||||
fst_check_int_equals(x.value, 0);
|
||||
x.value++;
|
||||
fst_check_int_equals(x.value, 1);
|
||||
x.value -= 2;
|
||||
fst_check_int_equals(x.value, 0x7fffffff);
|
||||
|
||||
overflow = (uint32_t)0x7fffffff + 1;
|
||||
x.value = overflow;
|
||||
fst_check_int_equals(x.value, 0);
|
||||
}
|
||||
FST_TEST_END()
|
||||
|
||||
FST_TEST_BEGIN(test_switch_parse_cidr_v6)
|
||||
{
|
||||
ip_t ip, mask;
|
||||
|
|
Loading…
Reference in New Issue