WebRTC ICE Connection Failing with Valid ICE Servers and Candidates - rust

This is a continuation of my previous question here.
I started using the webrtc crate in this library to create a WebRTC connection, but I'm having issues with the ICE connection failing. I checked the STUN and TURN servers being used with this site and they all work correctly.
Here is my current code:
async fn new(...) {
let webrtcredux = Arc::new(AsyncMutex::new(WebRtcRedux::default()));
webrtcredux.lock().await.set_tokio_runtime(Handle::current());
let servers = ice.urls.into_iter().map(|url| {
if url.starts_with("turn") {
RTCIceServer {
urls: vec![url],
username: ice.username.clone(),
credential: ice.credential.clone(),
.. RTCIceServer::default()
}
} else {
RTCIceServer {
urls: vec![url],
.. RTCIceServer::default()
}
}
}).collect::<Vec<_>>();
debug!("Using ICE servers: {:#?}", servers);
webrtcredux.lock().await.add_ice_servers(servers);
// More gstreamer setup code...
}
async fn start(...) {
self.pipeline.set_state(gst::State::Playing)?;
let encoder = self.encoder_type;
let arc_from_ws = Arc::new(AsyncMutex::new(from_ws_rx));
self.webrtcredux.lock().await.on_peer_connection_state_change(Box::new(|state| {
debug!("[WebRTC] Peer connection state changed to: {}", state);
Box::pin(async {})
})).await.expect("Failed to set on peer connection state change");
self.webrtcredux.lock().await.on_ice_connection_state_change(Box::new(|state| {
debug!("[WebRTC] ICE connection state changed to: {}", state);
Box::pin(async {})
})).await.expect("Failed to set on ice connection state change");
// let redux_arc = self.webrtcredux.clone();
// let candidates = Arc::new(AsyncMutex::new(Vec::new()));
// let candidates_arc = candidates.clone();
self.webrtcredux.lock().await.on_ice_candidate(Box::new(move |candidate| {
// let redux_arc = redux_arc.clone()
// let candidates = candidates_arc.clone();
Box::pin(async move {
if let Some(candidate) = candidate {
debug!("ICE Candidate: {:#?}", candidate.to_json().await.unwrap());
// candidates.lock().await.push(candidate.to_json().await.unwrap());
}
// redux_arc.lock().await.add_ice_candidate(candidate.unwrap().to_json().await.unwrap()).await.unwrap();
})
})).await.expect("Failed ice candidate");
let redux_arc = self.webrtcredux.clone();
self.webrtcredux.lock().await.on_negotiation_needed(Box::new(move || {
let redux_arc = redux_arc.clone();
info!("[WebRTC] Negotiation needed");
Box::pin(async move {
// Waits for all tracks to be added to create full SDP
redux_arc.lock().await.wait_for_all_tracks().await;
let offer = redux_arc.lock().await.create_offer(Some(RTCOfferOptions {
voice_activity_detection: true,
ice_restart: false,
})).await.expect("Failed to create offer");
// offer.props.insert(4, SdpProp::Attribute {
// key: "ice-options".to_string(),
// value: Some("trickle".to_string())
// });
// offer.props.insert(5, SdpProp::Attribute {
// key: "extmap-allow-mixed".to_string(),
// value: None
// });
// offer.props.insert(6, SdpProp::Attribute {
// key: "msid-semantic".to_string(),
// value: Some(" WMS".to_string())
// });
trace!("[WebRTC] Generated local SDP: {}", offer.to_string());
redux_arc.lock().await.set_local_description(&offer, RTCSdpType::Offer).await.expect("Failed to set local description");
info!("[WebRTC] Local description set");
})
})).await.expect("Failed to set on negotiation needed");
let redux_arc = self.webrtcredux.clone();
self.webrtcredux.lock().await.on_ice_gathering_state_change(Box::new(move |state| {
debug!("[WebRTC] ICE gathering state changed to: {}", state);
let redux_arc = redux_arc.clone();
let to_ws_tx = to_ws_tx.clone();
let from_ws_rx = arc_from_ws.clone();
if state != RTCIceGathererState::Complete {
return Box::pin(async {});
}
Box::pin(async move {
let local = redux_arc.lock().await.local_description().await.unwrap().unwrap();
let video_media: &SdpProp = local.props.iter().find(|v| match *v {
SdpProp::Media { r#type, .. } => {
*r#type == MediaType::Video
},
_ => false
}).unwrap();
let (video_ssrc, video_payload_type, rtx_payload_type) = if let SdpProp::Media { props, .. } = video_media {
let mut ssrc = 0u32;
let mut video_payload = 0u8;
let mut rtx_payload = 0u8;
for prop in props {
match prop {
MediaProp::Attribute { key, value } => {
match key {
v if *v == "rtpmap".to_string() => {
match value {
Some(val) => {
let num = val.clone().split(' ').collect::<Vec<_>>()[0].parse::<u8>().unwrap();
if val.ends_with(&format!("{}/90000", encoder.type_string())) && video_payload == 0 {
video_payload = num;
} else if val.ends_with("rtx/90000") && rtx_payload == 0 {
rtx_payload = num;
}
},
None => unreachable!()
}
},
v if *v == "ssrc".to_string() => {
ssrc = match value {
Some(val) => val.clone().split(' ').collect::<Vec<_>>()[0].parse::<u32>().unwrap(),
None => unreachable!(),
};
},
_ => continue
}
},
_ => continue
}
}
(ssrc, video_payload, rtx_payload)
} else { unreachable!() };
let audio_media: &SdpProp = local.props.iter().find(|v| match *v {
SdpProp::Media { r#type, .. } => {
*r#type == MediaType::Audio
},
_ => false
}).unwrap();
let audio_ssrc = if let SdpProp::Media { props, .. } = audio_media {
props.into_iter().find_map(|p| match p {
MediaProp::Attribute {key, value} => {
if key != "ssrc" {
return None;
}
let val = match value {
Some(val) => val.clone(),
None => unreachable!(),
};
Some(val.split(' ').collect::<Vec<_>>()[0].parse::<u32>().unwrap())
},
_ => None
}).unwrap()
} else { unreachable!() };
trace!("[WebRTC] Updated local SDP: {}", local.to_string());
to_ws_tx.send(ToWs {
ssrcs: StreamSSRCs {
audio: audio_ssrc,
video: video_ssrc,
rtx: 0
},
local_sdp: local.to_string(),
video_payload_type,
rtx_payload_type,
}).await.unwrap();
let from_ws = from_ws_rx.lock().await.recv().await.unwrap();
match SDP::from_str(&from_ws.remote_sdp).unwrap().props.pop().unwrap() {
SdpProp::Media { ports, props, .. } => {
let mut main_ip = None;
let mut fingerprint = None;
let mut ufrag = None;
let mut pwd = None;
let mut candidate = None;
for prop in props {
let current = prop.clone();
match prop {
MediaProp::Connection { address, .. } => main_ip = Some(address),
MediaProp::Attribute { key, value: _ } => {
match &key[..] {
"candidate" => candidate = Some(current),
"fingerprint" => fingerprint = Some(current),
"ice-ufrag" => ufrag = Some(current),
"ice-pwd" => pwd = Some(current),
_ => continue
}
}
_ => continue
}
}
let connection = MediaProp::Connection {
net_type: NetworkType::Internet,
address_type: AddressType::IPv4,
address: main_ip.unwrap(),
ttl: Some(127),
num_addresses: Some(1),
suffix: None,
};
let base_media_props = vec![
connection,
// candidate.unwrap(),
fingerprint.unwrap(),
ufrag.unwrap(),
pwd.unwrap(),
MediaProp::Attribute {
key: "rtcp-mux".to_string(),
value: None
},
MediaProp::Attribute {
key: "rtcp".to_string(),
value: Some(ports[0].to_string())
},
MediaProp::Attribute {
key: "setup".to_string(),
value: Some("passive".to_string())
},
MediaProp::Attribute {
key: "inactive".to_string(),
value: None
}
];
let mut video_vec_attrs = ["ccm fir", "nack", "nack pli", "goog-remb", "transport-cc"].into_iter().map(|val| {
MediaProp::Attribute {
key: "rtcp-fb".to_string(),
value: Some(format!("{} {}", video_payload_type, val))
}
}).collect::<Vec<_>>();
video_vec_attrs.append(&mut [
"2 http://www.webrtc.org/experiments/rtp-hdrext/abs-send-time",
"3 http://www.ietf.org/id/draft-holmer-rmcat-transport-wide-cc-extensions-01",
"14 urn:ietf:params:rtp-hdrext:toffset",
"13 urn:3gpp:video-orientation",
"5 http://www.webrtc.org/experiments/rtp-hdrext/playout-delay"
].into_iter().map(|ext| {
MediaProp::Attribute {
key: "extmap".to_string(),
value: Some(ext.to_string())
}
}).collect::<Vec<_>>());
video_vec_attrs.append(&mut vec![
MediaProp::Attribute {
key: "fmtp".to_string(),
value: Some(format!("{} x-google-max-bitrate=2500;level-asymmetry-allowed=1;packetization-mode=1;profile-level-id=42e01f", video_payload_type))
},
MediaProp::Attribute {
key: "fmtp".to_string(),
value: Some(format!("{} apt={}", rtx_payload_type, video_payload_type))
},
MediaProp::Attribute {
key: "mid".to_string(),
value: Some(0.to_string())
},
MediaProp::Attribute {
key: "rtpmap".to_string(),
value: Some(format!("{} {}/90000", video_payload_type, encoder.type_string()))
},
MediaProp::Attribute {
key: "rtpmap".to_string(),
value: Some(format!("{} rtx/90000", rtx_payload_type))
},
candidate.unwrap(),
MediaProp::Attribute {
key: "end-of-candidates".to_string(),
value: None
}
]);
let video_media = SdpProp::Media {
r#type: MediaType::Video,
ports: ports.clone(),
protocol: format!("UDP/TLS/RTP/SAVPF {} {}", video_payload_type, rtx_payload_type),
format: "".to_string(),
props: base_media_props.clone().into_iter().chain(video_vec_attrs.into_iter()).collect::<Vec<_>>()
};
let mut audio_vec_attrs = [
"1 urn:ietf:params:rtp-hdrext:ssrc-audio-level",
"3 http://www.ietf.org/id/draft-holmer-rmcat-transport-wide-cc-extensions-01"
].into_iter().map(|ext| {
MediaProp::Attribute {
key: "extmap".to_string(),
value: Some(ext.to_string())
}
}).collect::<Vec<_>>();
audio_vec_attrs.append(&mut vec![
MediaProp::Attribute {
key: "fmtp".to_string(),
value: Some("111 minptime=10;useinbandfec=1;usedtx=1".to_string())
},
MediaProp::Attribute {
key: "maxptime".to_string(),
value: Some(60.to_string())
},
MediaProp::Attribute {
key: "rtpmap".to_string(),
value: Some("111 opus/90000".to_string())
},
MediaProp::Attribute {
key: "rtcp-fb".to_string(),
value: Some("111 transport-cc".to_string())
},
MediaProp::Attribute {
key: "mid".to_string(),
value: Some(1.to_string())
}
]);
let audio_media = SdpProp::Media {
r#type: MediaType::Audio,
ports,
protocol: "UDP/TLS/RTP/SAVPF 111".to_string(),
format: "".to_string(),
props: base_media_props.clone().into_iter().chain(audio_vec_attrs.into_iter()).collect::<Vec<_>>()
};
// Generate answer
let answer = SDP { props: vec![
SdpProp::Version(0),
SdpProp::Origin {
username: "-".to_string(),
session_id: "1420070400000".to_string(),
session_version: 0,
net_type: NetworkType::Internet,
address_type: AddressType::IPv4,
address: "127.0.0.1".to_string()
},
SdpProp::SessionName("-".to_string()),
SdpProp::Timing {
start: 0,
stop: 0
},
SdpProp::Attribute {
key: "msid-semantic".to_string(),
value: Some(" WMS *".to_string())
},
SdpProp::Attribute {
key: "group".to_string(),
value: Some("BUNDLE 0 1".to_string())
},
video_media,
audio_media
]};
trace!("[WebRTC] Generated remote SDP: {}", answer.to_string());
redux_arc.lock().await.set_remote_description(&answer, RTCSdpType::Answer).await.expect("Failed to set remote description");
info!("[WebRTC] Remote description set");
}
_ => unreachable!()
}
})
})).await.expect("Failed to set on ice gathering change");
Ok(StateChangeSuccess::Success)
}
Local SDP after ICE gathering completes:
v=0
o=- 3006164469565782471 253007078 IN IP4 0.0.0.0
s=-
t=0 0
a=fingerprint:sha-256 F5:34:75:08:3E:AB:99:1E:5F:79:BF:6D:14:EC:D6:C2:F6:20:74:D6:D3:1D:78:48:58:B6:1E:2B:32:F3:D9:64
a=group:BUNDLE 0 1
m=video 9 UDP/TLS/RTP/SAVPF 96 97 98 99 100 101 102 121 127 120 125 107 108 109 123 118 116
c=IN IP4 0.0.0.0
a=setup:actpass
a=mid:0
a=ice-ufrag:cWRCBPTiuOohkLsf
a=ice-pwd:mHMqXcRexKOkbHKAZlvxjgvLFtdHiZAL
a=rtcp-mux
a=rtcp-rsize
a=rtpmap:96 VP8/90000
a=rtcp-fb:96 goog-remb
a=rtcp-fb:96 ccm fir
a=rtcp-fb:96 nack
a=rtcp-fb:96 nack pli
a=rtcp-fb:96 nack
a=rtcp-fb:96 nack pli
a=rtcp-fb:96 transport-cc
a=rtpmap:97 rtx/90000
a=fmtp:97 apt=96
a=rtcp-fb:97 nack
a=rtcp-fb:97 nack pli
a=rtcp-fb:97 transport-cc
a=rtpmap:98 VP9/90000
a=fmtp:98 profile-id=0
a=rtcp-fb:98 goog-remb
a=rtcp-fb:98 ccm fir
a=rtcp-fb:98 nack
a=rtcp-fb:98 nack pli
a=rtcp-fb:98 nack
a=rtcp-fb:98 nack pli
a=rtcp-fb:98 transport-cc
a=rtpmap:99 rtx/90000
a=fmtp:99 apt=98
a=rtcp-fb:99 nack
a=rtcp-fb:99 nack pli
a=rtcp-fb:99 transport-cc
a=rtpmap:100 VP9/90000
a=fmtp:100 profile-id=1
a=rtcp-fb:100 goog-remb
a=rtcp-fb:100 ccm fir
a=rtcp-fb:100 nack
a=rtcp-fb:100 nack pli
a=rtcp-fb:100 nack
a=rtcp-fb:100 nack pli
a=rtcp-fb:100 transport-cc
a=rtpmap:101 rtx/90000
a=fmtp:101 apt=100
a=rtcp-fb:101 nack
a=rtcp-fb:101 nack pli
a=rtcp-fb:101 transport-cc
a=rtpmap:102 H264/90000
a=fmtp:102 level-asymmetry-allowed=1;packetization-mode=1;profile-level-id=42001f
a=rtcp-fb:102 goog-remb
a=rtcp-fb:102 ccm fir
a=rtcp-fb:102 nack
a=rtcp-fb:102 nack pli
a=rtcp-fb:102 nack
a=rtcp-fb:102 nack pli
a=rtcp-fb:102 transport-cc
a=rtpmap:121 rtx/90000
a=fmtp:121 apt=102
a=rtcp-fb:121 nack
a=rtcp-fb:121 nack pli
a=rtcp-fb:121 transport-cc
a=rtpmap:127 H264/90000
a=fmtp:127 level-asymmetry-allowed=1;packetization-mode=0;profile-level-id=42001f
a=rtcp-fb:127 goog-remb
a=rtcp-fb:127 ccm fir
a=rtcp-fb:127 nack
a=rtcp-fb:127 nack pli
a=rtcp-fb:127 nack
a=rtcp-fb:127 nack pli
a=rtcp-fb:127 transport-cc
a=rtpmap:120 rtx/90000
a=fmtp:120 apt=127
a=rtcp-fb:120 nack
a=rtcp-fb:120 nack pli
a=rtcp-fb:120 transport-cc
a=rtpmap:125 H264/90000
a=fmtp:125 level-asymmetry-allowed=1;packetization-mode=1;profile-level-id=42e01f
a=rtcp-fb:125 goog-remb
a=rtcp-fb:125 ccm fir
a=rtcp-fb:125 nack
a=rtcp-fb:125 nack pli
a=rtcp-fb:125 nack
a=rtcp-fb:125 nack pli
a=rtcp-fb:125 transport-cc
a=rtpmap:107 rtx/90000
a=fmtp:107 apt=125
a=rtcp-fb:107 nack
a=rtcp-fb:107 nack pli
a=rtcp-fb:107 transport-cc
a=rtpmap:108 H264/90000
a=fmtp:108 level-asymmetry-allowed=1;packetization-mode=0;profile-level-id=42e01f
a=rtcp-fb:108 goog-remb
a=rtcp-fb:108 ccm fir
a=rtcp-fb:108 nack
a=rtcp-fb:108 nack pli
a=rtcp-fb:108 nack
a=rtcp-fb:108 nack pli
a=rtcp-fb:108 transport-cc
a=rtpmap:109 rtx/90000
a=fmtp:109 apt=108
a=rtcp-fb:109 nack
a=rtcp-fb:109 nack pli
a=rtcp-fb:109 transport-cc
a=rtpmap:123 H264/90000
a=fmtp:123 level-asymmetry-allowed=1;packetization-mode=1;profile-level-id=640032
a=rtcp-fb:123 goog-remb
a=rtcp-fb:123 ccm fir
a=rtcp-fb:123 nack
a=rtcp-fb:123 nack pli
a=rtcp-fb:123 nack
a=rtcp-fb:123 nack pli
a=rtcp-fb:123 transport-cc
a=rtpmap:118 rtx/90000
a=fmtp:118 apt=123
a=rtcp-fb:118 nack
a=rtcp-fb:118 nack pli
a=rtcp-fb:118 transport-cc
a=rtpmap:116 ulpfec/90000
a=rtcp-fb:116 nack
a=rtcp-fb:116 nack pli
a=rtcp-fb:116 transport-cc
a=extmap:1 http://www.ietf.org/id/draft-holmer-rmcat-transport-wide-cc-extensions-01
a=ssrc:3980097584 cname:video_0
a=ssrc:3980097584 msid:video_0 video
a=ssrc:3980097584 mslabel:video_0
a=ssrc:3980097584 label:video
a=msid:video_0 video
a=sendrecv
a=candidate:167090039 1 udp 2130706431 :: 48818 typ host
a=candidate:167090039 2 udp 2130706431 :: 48818 typ host
a=candidate:2938512866 1 udp 2130706431 192.168.1.100 47953 typ host
a=candidate:2938512866 2 udp 2130706431 192.168.1.100 47953 typ host
a=candidate:2414835526 1 udp 1694498815 72.196.215.130 35989 typ srflx raddr 0.0.0.0 rport 35989
a=candidate:2414835526 2 udp 1694498815 72.196.215.130 35989 typ srflx raddr 0.0.0.0 rport 35989
a=candidate:2414835526 1 udp 1694498815 72.196.215.130 37580 typ srflx raddr 0.0.0.0 rport 37580
a=candidate:2414835526 2 udp 1694498815 72.196.215.130 37580 typ srflx raddr 0.0.0.0 rport 37580
a=candidate:2414835526 1 udp 1694498815 72.196.215.130 59238 typ srflx raddr 0.0.0.0 rport 59238
a=candidate:2414835526 2 udp 1694498815 72.196.215.130 59238 typ srflx raddr 0.0.0.0 rport 59238
a=candidate:2414835526 1 udp 1694498815 72.196.215.130 53377 typ srflx raddr 0.0.0.0 rport 53377
a=candidate:2414835526 2 udp 1694498815 72.196.215.130 53377 typ srflx raddr 0.0.0.0 rport 53377
a=candidate:1022905401 1 udp 16777215 34.203.251.215 29290 typ relay raddr 0.0.0.0 rport 38594
a=candidate:1022905401 2 udp 16777215 34.203.251.215 29290 typ relay raddr 0.0.0.0 rport 38594
a=end-of-candidates
m=audio 9 UDP/TLS/RTP/SAVPF 111 9 0 8
c=IN IP4 0.0.0.0
a=setup:actpass
a=mid:1
a=ice-ufrag:cWRCBPTiuOohkLsf
a=ice-pwd:mHMqXcRexKOkbHKAZlvxjgvLFtdHiZAL
a=rtcp-mux
a=rtcp-rsize
a=rtpmap:111 opus/48000/2
a=fmtp:111 minptime=10;useinbandfec=1
a=rtcp-fb:111 transport-cc
a=rtpmap:9 G722/8000
a=rtcp-fb:9 transport-cc
a=rtpmap:0 PCMU/8000
a=rtcp-fb:0 transport-cc
a=rtpmap:8 PCMA/8000
a=rtcp-fb:8 transport-cc
a=extmap:1 http://www.ietf.org/id/draft-holmer-rmcat-transport-wide-cc-extensions-01
a=ssrc:597106938 cname:audio_0
a=ssrc:597106938 msid:audio_0 audio
a=ssrc:597106938 mslabel:audio_0
a=ssrc:597106938 label:audio
a=msid:audio_0 audio
a=sendrecv
Generated remote SDP:
v=0
o=- 1420070400000 0 IN IP4 127.0.0.1
s=-
t=0 0
a=msid-semantic: WMS *
a=group:BUNDLE 0 1
m=video 50016 UDP/TLS/RTP/SAVPF 98 97
c=IN IP4 66.22.231.190/127/1
a=fingerprint:sha-256 4A:79:94:16:44:3F:BD:05:41:5A:C7:20:F3:12:54:70:00:73:5D:33:00:2D:2C:80:9B:39:E1:9F:2D:A7:49:87
a=ice-ufrag:PkLE
a=ice-pwd:o9QGn2N6YizFOM/UNojYai
a=rtcp-mux
a=rtcp:50016
a=setup:passive
a=inactive
a=rtcp-fb:98 ccm fir
a=rtcp-fb:98 nack
a=rtcp-fb:98 nack pli
a=rtcp-fb:98 goog-remb
a=rtcp-fb:98 transport-cc
a=extmap:2 http://www.webrtc.org/experiments/rtp-hdrext/abs-send-time
a=extmap:3 http://www.ietf.org/id/draft-holmer-rmcat-transport-wide-cc-extensions-01
a=extmap:14 urn:ietf:params:rtp-hdrext:toffset
a=extmap:13 urn:3gpp:video-orientation
a=extmap:5 http://www.webrtc.org/experiments/rtp-hdrext/playout-delay
a=fmtp:98 x-google-max-bitrate=2500;level-asymmetry-allowed=1;packetization-mode=1;profile-level-id=42e01f
a=fmtp:97 apt=98
a=mid:0
a=rtpmap:98 VP9/90000
a=rtpmap:97 rtx/90000
a=candidate:1 1 UDP 4261412862 66.22.231.190 50016 typ host
a=end-of-candidates
m=audio 50016 UDP/TLS/RTP/SAVPF 111
c=IN IP4 66.22.231.190/127/1
a=fingerprint:sha-256 4A:79:94:16:44:3F:BD:05:41:5A:C7:20:F3:12:54:70:00:73:5D:33:00:2D:2C:80:9B:39:E1:9F:2D:A7:49:87
a=ice-ufrag:PkLE
a=ice-pwd:o9QGn2N6YizFOM/UNojYai
a=rtcp-mux
a=rtcp:50016
a=setup:passive
a=inactive
a=extmap:1 urn:ietf:params:rtp-hdrext:ssrc-audio-level
a=extmap:3 http://www.ietf.org/id/draft-holmer-rmcat-transport-wide-cc-extensions-01
a=fmtp:111 minptime=10;useinbandfec=1;usedtx=1
a=maxptime:60
a=rtpmap:111 opus/90000
a=rtcp-fb:111 transport-cc
a=mid:1
After setting the remote answer SDP the ICE connection state changes to "checking", but after about 20 seconds it changes to "failing" and kills the connection. Is there something wrong I'm doing in terms of the SDPs or my code?
Edit: I got logging working, here is the connection log:
https://pastebin.com/vNvd3Af6
Edit 2: I'm not receiving any inbound traffic from the STUN servers. Other programs using the same ICE servers work fine, so what could I be doing wrong outside of basic network configuration?
Edit 3: Here is a working ICE connection capture and here is the one I'm currently dealing with.
Edit 4: I ran netstat to see what ports my code is listening on, and there are some differences. I cut out all other programs.
Here is the working ICE connection:
Active Internet connections (only servers)
Proto Recv-Q Send-Q Local Address Foreign Address State PID/Program name
tcp 0 0 192.168.1.100:41383 0.0.0.0:* LISTEN 37973/target/debug/
tcp 0 0 192.168.1.100:51469 0.0.0.0:* LISTEN 37973/target/debug/
tcp6 0 0 fe80::60d2:bcaa:a:40899 :::* LISTEN 37973/target/debug/
udp 0 0 192.168.1.100:44897 0.0.0.0:* 37973/target/debug/
udp 0 0 239.255.255.250:1900 0.0.0.0:* 37973/target/debug/
udp 0 0 192.168.1.100:1900 0.0.0.0:* 37973/target/debug/
udp 0 0 239.255.255.250:1900 0.0.0.0:* 37973/target/debug/
udp 0 0 127.0.0.1:1900 0.0.0.0:* 37973/target/debug/
udp 0 0 127.0.0.1:37386 0.0.0.0:* 37973/target/debug/
udp 0 0 192.168.1.100:59877 0.0.0.0:* 37973/target/debug/
udp6 0 0 fe80::60d2:bcaa:a:56003 :::* 37973/target/debug/
Here is the non-working connection:
Active Internet connections (only servers)
Proto Recv-Q Send-Q Local Address Foreign Address State PID/Program name
udp 0 0 0.0.0.0:50651 0.0.0.0:* 37186/target/debug/
udp 0 0 0.0.0.0:51996 0.0.0.0:* 37186/target/debug/
udp 0 0 0.0.0.0:35776 0.0.0.0:* 37186/target/debug/
udp 0 0 0.0.0.0:53036 0.0.0.0:* 37186/target/debug/
udp 0 0 224.0.0.251:5353 0.0.0.0:* 37186/target/debug/
udp 0 0 0.0.0.0:40115 0.0.0.0:* 37186/target/debug/
udp 0 0 192.168.1.100:40707 0.0.0.0:* 37186/target/debug/
udp6 0 0 :::37965 :::* 37186/target/debug/

The server I was communicating with only accepted LF line endings, while I was sending CRLF in my new implementation. Changing it to LF fixed the issue.

Related

Answer SDP Error Kurento NodeJS server with vanilla webRTC client

I've been developing an application to record a session from the browser using the Kurento Media Server. I've set up the backend using the kurento-client library and used the vanilla WebRTC API on the front end.
The offer is generated at the client side and the answer is returned back from the server.
The issue arises during the setRemoteDescription method at the client side.
I'm getting the error as shown below
Uncaught (in promise) TypeError: Failed to execute 'setRemoteDescription' on 'RTCPeerConnection': The provided value is not of type 'RTCSessionDescriptionInit'.
console logging the received SDP yields me the following value.
v=0
o=- 3858259838 3858259838 IN IP4 0.0.0.0
s=Kurento Media Server
c=IN IP4 0.0.0.0
t=0 0
a=extmap-allow-mixed:
a=msid-semantic: WMS EkyCjRfgsyNQMlKh9vmRFVBIlCgbgNt51tst
a=group:BUNDLE 0 1
m=audio 1 UDP/TLS/RTP/SAVPF 111 0
a=extmap:2 http://www.webrtc.org/experiments/rtp-hdrext/abs-send-time
a=sendrecv
a=mid:0
a=rtcp:9 IN IP4 0.0.0.0
a=rtpmap:111 opus/48000/2
a=rtpmap:0 PCMU/8000
a=setup:active
a=rtcp-mux
a=fmtp:111 minptime=10;useinbandfec=1
a=ssrc:2441312662 cname:user4258135824#host-a3760c98
a=ice-ufrag:u7MJ
a=ice-pwd:uAgK+b4b5eK2333Z+qQZnP
a=fingerprint:sha-256 BD:6C:C8:40:7C:30:60:30:76:63:CC:28:20:D3:81:5F:EE:5A:6D:B0:C4:AA:09:37:70:8E:13:55:51:81:4B:37
m=video 1 UDP/TLS/RTP/SAVPF 96 127 125 108 124 123 35
a=extmap:2 http://www.webrtc.org/experiments/rtp-hdrext/abs-send-time
a=sendrecv
a=mid:1
a=rtcp:9 IN IP4 0.0.0.0
a=rtpmap:96 VP8/90000
a=rtpmap:127 H264/90000
a=rtpmap:125 H264/90000
a=rtpmap:108 H264/90000
a=rtpmap:124 H264/90000
a=rtpmap:123 H264/90000
a=rtpmap:35 H264/90000
a=rtcp-fb:96 goog-remb
a=rtcp-fb:96 ccm fir
a=rtcp-fb:96 nack
a=rtcp-fb:96 nack pli
a=rtcp-fb:127 goog-remb
a=rtcp-fb:127 ccm fir
a=rtcp-fb:127 nack
a=rtcp-fb:127 nack pli
a=rtcp-fb:125 goog-remb
a=rtcp-fb:125 ccm fir
a=rtcp-fb:125 nack
a=rtcp-fb:125 nack pli
a=rtcp-fb:108 goog-remb
a=rtcp-fb:108 ccm fir
a=rtcp-fb:108 nack
a=rtcp-fb:108 nack pli
a=rtcp-fb:124 goog-remb
a=rtcp-fb:124 ccm fir
a=rtcp-fb:124 nack
a=rtcp-fb:124 nack pli
a=rtcp-fb:123 goog-remb
a=rtcp-fb:123 ccm fir
a=rtcp-fb:123 nack
a=rtcp-fb:123 nack pli
a=rtcp-fb:35 goog-remb
a=rtcp-fb:35 ccm fir
a=rtcp-fb:35 nack
a=rtcp-fb:35 nack pli
a=setup:active
a=rtcp-mux
a=fmtp:127 level-asymmetry-allowed=1;packetization-mode=1;profile-level-id=42001f
a=fmtp:125 level-asymmetry-allowed=1;packetization-mode=0;profile-level-id=42001f
a=fmtp:108 level-asymmetry-allowed=1;packetization-mode=1;profile-level-id=42e01f
a=fmtp:124 level-asymmetry-allowed=1;packetization-mode=0;profile-level-id=42e01f
a=fmtp:123 level-asymmetry-allowed=1;packetization-mode=1;profile-level-id=4d001f
a=fmtp:35 level-asymmetry-allowed=1;packetization-mode=0;profile-level-id=4d001f
a=ssrc:3201711112 cname:user4258135824#host-a3760c98
a=ice-ufrag:u7MJ
a=ice-pwd:uAgK+b4b5eK2333Z+qQZnP
a=fingerprint:sha-256 BD:6C:C8:40:7C:30:60:30:76:63:CC:28:20:D3:81:5F:EE:5A:6D:B0:C4:AA:09:37:70:8E:13:55:51:81:4B:37
I've provided the code segment below which throws the error.
socket.on("credentials", async({ sdp, ice} ) => {
try {
if(sdp) {
console.log(sdp)
await pc.setRemoteDescription(sdp)
console.log('set')
}
else if(ice) {
await pc.addIceCandidate(ice)
console.log('ice set')
}
}
catch(error) {
console.log(error)
}
})
Is this not the correct format for the answer SDP? Some help to solve this issue is much appreciated.
You have a problem with your terminology. The SDP itself is the string starting with v=....
However, the setRemoteDescription call expects a RTCSessionDescriptionInit argument, i.e. an object with {type, sdp}.
That type (offer or answer typically) should also come from your signaling.

Why does shutdown write in the client cause the connection to be closed?

NOTE: No error appears if the shutdown in client is deleted
// server.rs
use std::io::BufReader;
use std::io::Read;
use std::io::Write;
use std::net::Shutdown;
use std::net::TcpListener;
use std::thread;
fn main() {
let listener = TcpListener::bind("127.0.0.1:4000").unwrap();
for stream in listener.incoming() {
let mut stream = stream.unwrap();
thread::spawn(move || {
let mut reader = BufReader::new(&stream);
let mut buffer = [0; 1024];
let len = reader.read(&mut buffer).unwrap();
// no sleep no error, just simulating a time-consuming operation
thread::sleep(std::time::Duration::from_secs(1));
stream.write_all(&buffer[0..len]).unwrap();
stream.shutdown(Shutdown::Write).unwrap();
});
}
}
// client.rs
use std::io::{Read, Write};
use std::net::TcpStream;
use std::thread;
fn main() {
let mut clients = Vec::new();
for _ in 0..1000 {
clients.push(thread::spawn(move || {
let mut client = TcpStream::connect("127.0.0.1:4000").unwrap();
client.write_all("hello".as_bytes()).unwrap();
// no error if remove the following line
client.shutdown(std::net::Shutdown::Write).unwrap();
let mut buffer = Vec::new();
client.read_to_end(&mut buffer).unwrap();
println!("{}", std::str::from_utf8(&buffer).unwrap());
}));
}
for client in clients.into_iter() {
client.join().unwrap();
}
}
As I understand, shutdown the write operation will append FIN after sending the previous data, and then the peer (server) can still continue to write data. But among these 1000 clients, some error appeared:
// server
<unnamed>' panicked at 'called `Result::unwrap()` on an `Err` value: Os { code: 107, kind: NotConnected, message: "Transport endpoint is not connected" }', src/bin/server.rs:22:46
// client
thread '<unnamed>' panicked at 'called `Result::unwrap()` on an `Err` value: Os { code: 104, kind: ConnectionReset, message: "Connection reset by peer" }', src/bin/client.rs:15:45
It seems that the connection is closed after the shutdown in client.
Update1:
I used Wireshark and this is one of the wrong connections:
No. Time Source Destination Protocol Length Info
1101 13.738139 127.0.0.1 127.0.0.1 TCP 56 10628 → 4000 [SYN] Seq=0 Win=65535 Len=0 MSS=65495 WS=256 SACK_PERM=1
1104 13.738157 127.0.0.1 127.0.0.1 TCP 44 4000 → 10628 [RST, ACK] Seq=409345761 Ack=1 Win=0 Len=0
1234 14.251615 127.0.0.1 127.0.0.1 TCP 56 [TCP Retransmission] [TCP Port numbers reused] 10628 → 4000 [SYN] Seq=0 Win=65535 Len=0 MSS=65495 WS=256 SACK_PERM=1
1250 14.251690 127.0.0.1 127.0.0.1 TCP 56 [TCP Port numbers reused] 4000 → 10628 [SYN, ACK] Seq=0 Ack=1 Win=65535 Len=0 MSS=65495 WS=256 SACK_PERM=1
1266 14.251726 127.0.0.1 127.0.0.1 TCP 44 10628 → 4000 [ACK] Seq=1 Ack=1 Win=2161152 Len=0
1376 14.251949 127.0.0.1 127.0.0.1 TCP 49 10628 → 4000 [PSH, ACK] Seq=1 Ack=1 Win=2161152 Len=5
1387 14.251970 127.0.0.1 127.0.0.1 TCP 44 4000 → 10628 [ACK] Seq=1 Ack=6 Win=2161152 Len=0
1402 14.251996 127.0.0.1 127.0.0.1 TCP 44 10628 → 4000 [FIN, ACK] Seq=6 Ack=1 Win=2161152 Len=0
1412 14.252013 127.0.0.1 127.0.0.1 TCP 44 4000 → 10628 [ACK] Seq=1 Ack=7 Win=2161152 Len=0
2092 15.261312 127.0.0.1 127.0.0.1 TCP 49 4000 → 10628 [PSH, ACK] Seq=1 Ack=7 Win=2161152 Len=5
2101 15.261384 127.0.0.1 127.0.0.1 TCP 44 10628 → 4000 [RST, ACK] Seq=7 Ack=6 Win=0 Len=0
Update2:
One of correct connections:
No. Time Source Destination Protocol Length Info
162 13.731960 127.0.0.1 127.0.0.1 TCP 56 10927 → 4000 [SYN] Seq=0 Win=65535 Len=0 MSS=65495 WS=256 SACK_PERM=1
166 13.731997 127.0.0.1 127.0.0.1 TCP 56 4000 → 10927 [SYN, ACK] Seq=0 Ack=1 Win=65535 Len=0 MSS=65495 WS=256 SACK_PERM=1
169 13.732013 127.0.0.1 127.0.0.1 TCP 44 10927 → 4000 [ACK] Seq=1 Ack=1 Win=2161152 Len=0
176 13.732035 127.0.0.1 127.0.0.1 TCP 49 10927 → 4000 [PSH, ACK] Seq=1 Ack=1 Win=2161152 Len=5
181 13.732046 127.0.0.1 127.0.0.1 TCP 44 4000 → 10927 [ACK] Seq=1 Ack=6 Win=2161152 Len=0
187 13.732059 127.0.0.1 127.0.0.1 TCP 44 10927 → 4000 [FIN, ACK] Seq=6 Ack=1 Win=2161152 Len=0
191 13.732074 127.0.0.1 127.0.0.1 TCP 44 4000 → 10927 [ACK] Seq=1 Ack=7 Win=2161152 Len=0
1495 14.746260 127.0.0.1 127.0.0.1 TCP 49 4000 → 10927 [PSH, ACK] Seq=1 Ack=7 Win=2161152 Len=5
1502 14.746369 127.0.0.1 127.0.0.1 TCP 44 10927 → 4000 [ACK] Seq=7 Ack=6 Win=2161152 Len=0
1505 14.746423 127.0.0.1 127.0.0.1 TCP 44 4000 → 10927 [FIN, ACK] Seq=6 Ack=7 Win=2161152 Len=0
1512 14.746529 127.0.0.1 127.0.0.1 TCP 44 10927 → 4000 [ACK] Seq=7 Ack=7 Win=2161152 Len=0
I strongly suspect it has to do with the backlog, the number of connections accepted by the OS TCP stack, but not yet handled by your application.
std doesn't allow you to control this number and defaults it to 128.
To control the number, I've re-implemented your server in tokio (well, it's actually just the main example on the tokio README), and set the backlog to 3000.
use std::time::Duration;
use tokio::io::{AsyncReadExt, AsyncWriteExt};
use tokio::net::{TcpListener, TcpSocket};
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
let addr = "127.0.0.1:4000".parse().unwrap();
let socket = TcpSocket::new_v4()?;
socket.bind(addr)?;
let listener: TcpListener = socket.listen(3000)?;
loop {
let (mut socket, _) = listener.accept().await?;
tokio::spawn(async move {
let mut buf = [0; 1024];
loop {
let n = match socket.read(&mut buf).await {
Ok(n) if n == 0 => return,
Ok(n) => n,
Err(e) => {
eprintln!("failed to read from socket; err = {:?}", e);
return;
}
};
tokio::time::sleep(Duration::from_secs(1)).await;
if let Err(e) = socket.write_all(&buf[0..n]).await {
eprintln!("failed to write to socket; err = {:?}", e);
return;
}
}
});
}
}
This makes the problem disappear. Reducing it to socket.listen(128) makes it reappear. (Disclaimer: I do not suggest that 3000 is a sane number for the backlog.)
I said I strongly suspect that this is the cause because I can't fully explain how the sleep causes the problem. It may be that the many sleeping threads slow down the scheduler, and thus the speed at which your server can accept connections. But that is speculation.
(Side note: My default ulimit for open files was fairly low. I had to increase it with ulimit -nS 10000 to not get in the way when testing this.)

Google cloud engine external access issue

I'm new to Google cloud platform and I didn't understand why I cannot reach a node.js instance running on a new VM.
Node is running on port 8084 through app.listen('8084', "0.0.0.0")
Firewall rules are the following:
gcloud compute firewall-rules list
NAME NETWORK DIRECTION PRIORITY ALLOW DENY DISABLED
default-allow-http default INGRESS 1000 tcp:80 False
default-allow-https default INGRESS 1000 tcp:443 False
default-allow-icmp default INGRESS 65534 icmp False
default-allow-internal default INGRESS 65534 tcp:0-65535,udp:0-65535,icmp False
default-allow-rdp default INGRESS 65534 tcp:3389 False
default-allow-ssh default INGRESS 65534 tcp:22 False
node-8084 default INGRESS 999 tcp:8084 False
netstat:
netstat -na | grep LISTEN
tcp 0 0 0.0.0.0:80 0.0.0.0:* LISTEN
tcp 0 0 0.0.0.0:8084 0.0.0.0:* LISTEN
tcp 0 0 0.0.0.0:22 0.0.0.0:* LISTEN
tcp 0 0 127.0.0.1:34339 0.0.0.0:* LISTEN
tcp 0 0 127.0.0.1:8998 0.0.0.0:* LISTEN
tcp 0 0 0.0.0.0:65001 0.0.0.0:* LISTEN
tcp6 0 0 :::970 :::* LISTEN
tcp6 0 0 :::980 :::* LISTEN
tcp6 0 0 :::22 :::* LISTEN
tcp6 0 0 :::922 :::* LISTEN
I can reach the VM via SSH (port 22) but not through http://35.206.91.238:8084/medical on Chrome. 35.206.91.238 is the external IP showed by google cloud console.
Node.js presents no errors (and no requests).
main code is
var app = express();
app.get('/medical', function(request, response){
if( request.query.q )
run(request.query.q, function(results, queries) { parseResult(q, results, queries, response) } );
})
app.listen('8084', '0.0.0.0')
console.log('Server started on port 8084');
exports = module.exports = app;
Wireshark traffic is only
Only SYN is passing, chrome says "35.206.91.238 refused to connect."
Node is executed manually from shell and doesn't report any error after "Server started on port 8084".
Any idea

Nginx docker as nodejs proxy in local

At the moment I am using nginx in docker to proxy my node applications
for example I add website.dev in my host file
and my actual nginx config is
worker_processes 4;
pid /run/nginx.pid;
events {}
http {
server {
listen 80;
server_name website.dev;
location / {
proxy_pass http://localnode:3000;
proxy_set_header X-Base-Path "/";
proxy_set_header Website-Name "test";
}
}
}
And I a starting my docker container with this command
docker run --name infra-nginx --add-host localnode:$(ifconfig | grep inet | grep -v inet6 | grep -v 127.0.0.1 | awk '{print $2}') -p 80:80 -d docker-registry.host.com:5000/infra-nginx:dev
Where
$(ifconfig | grep inet | grep -v inet6 | grep -v 127.0.0.1 | awk '{print $2}') is getting my local ip, not working all the time.
The problem is if I want to work without internet, I can't.
And when my ip change, I have to restart the container with the new ip.
I tried with this config instead
worker_processes 4;
pid /run/nginx.pid;
events {}
http {
server {
listen 80;
server_name localhost;
location / {
return 200 'gangnam style!';
}
}
server {
listen 80;
server_name website.dev;
location / {
proxy_pass http://localhost:3000;
proxy_set_header X-Base-Path "/";
proxy_set_header Website-Name "test";
}
}
}
And running
docker run --name infra-nginx --network host -d docker-registry.host.com:5000/infra-nginx:dev
In this case, when I am running:
curl http://website.dev/
I have
curl: (7) Failed to connect to website.dev port 80: Connection refused
The docker ps is giving
81da561dd131 ajouve/infra-nginx:dev "nginx -g 'daemon ..." 32 minutes ago Up 32 minutes infra-nginx
the netstat -plant is giving me
Active Internet connections (servers and established)
Proto Recv-Q Send-Q Local Address Foreign Address State PID/Program name
tcp 0 0 0.0.0.0:80 0.0.0.0:* LISTEN 1/nginx: master pro
tcp 0 0 172.17.0.1:35962 172.17.0.4:27017 ESTABLISHED -
tcp 0 0 172.17.0.1:35938 172.17.0.4:27017 ESTABLISHED -
tcp 0 0 172.17.0.1:35994 172.17.0.4:27017 ESTABLISHED -
tcp 0 0 172.17.0.1:45080 172.17.0.5:6379 ESTABLISHED -
tcp 0 0 172.17.0.1:35990 172.17.0.4:27017 ESTABLISHED -
tcp 0 0 192.168.65.2:44900 151.101.0.204:80 TIME_WAIT -
tcp 0 0 172.17.0.1:45126 172.17.0.5:6379 ESTABLISHED -
tcp 0 0 172.17.0.1:36000 172.17.0.4:27017 ESTABLISHED -
tcp 0 0 172.17.0.1:35958 172.17.0.4:27017 ESTABLISHED -
tcp 0 0 172.17.0.1:59172 172.17.0.3:5672 ESTABLISHED -
tcp 0 0 172.17.0.1:35976 172.17.0.4:27017 ESTABLISHED -
tcp 0 0 172.17.0.1:59106 172.17.0.3:5672 ESTABLISHED -
tcp 0 0 172.17.0.1:35980 172.17.0.4:27017 ESTABLISHED -
tcp 0 0 172.17.0.1:35996 172.17.0.4:27017 ESTABLISHED -
tcp 0 0 172.17.0.1:58356 172.17.0.4:27017 ESTABLISHED -
tcp 0 0 172.17.0.1:35966 172.17.0.4:27017 ESTABLISHED -
tcp 0 0 172.17.0.1:45112 172.17.0.5:6379 ESTABLISHED -
tcp 0 0 172.17.0.1:35932 172.17.0.4:27017 ESTABLISHED -
tcp 0 0 172.17.0.1:58366 172.17.0.4:27017 ESTABLISHED -
tcp 0 0 172.17.0.1:35998 172.17.0.4:27017 ESTABLISHED -
tcp 0 0 192.168.65.2:41386 206.251.255.63:80 TIME_WAIT -
tcp 0 0 172.17.0.1:58358 172.17.0.4:27017 ESTABLISHED -
tcp 0 0 172.17.0.1:35956 172.17.0.4:27017 ESTABLISHED -
tcp 0 0 172.17.0.1:35924 172.17.0.4:27017 ESTABLISHED -
tcp 0 0 172.17.0.1:36004 172.17.0.4:27017 ESTABLISHED -
tcp 0 0 172.17.0.1:58360 172.17.0.4:27017 ESTABLISHED -
tcp 0 0 172.17.0.1:35964 172.17.0.4:27017 ESTABLISHED -
tcp 0 0 172.17.0.1:35916 172.17.0.4:27017 ESTABLISHED -
tcp 0 0 172.17.0.1:58362 172.17.0.4:27017 ESTABLISHED -
tcp 0 0 172.17.0.1:59148 172.17.0.3:5672 ESTABLISHED -
tcp 0 0 172.17.0.1:59166 172.17.0.3:5672 ESTABLISHED -
tcp 0 0 172.17.0.1:35944 172.17.0.4:27017 ESTABLISHED -
tcp 0 0 172.17.0.1:35912 172.17.0.4:27017 ESTABLISHED -
tcp 0 0 172.17.0.1:35954 172.17.0.4:27017 ESTABLISHED -
tcp 0 0 172.17.0.1:45116 172.17.0.5:6379 ESTABLISHED -
tcp 0 0 172.17.0.1:58354 172.17.0.4:27017 ESTABLISHED -
tcp 0 0 172.17.0.1:35988 172.17.0.4:27017 ESTABLISHED -
tcp 0 0 172.17.0.1:59122 172.17.0.3:5672 ESTABLISHED -
tcp 0 0 192.168.65.2:34936 5.153.231.4:80 TIME_WAIT -
tcp 0 0 192.168.65.2:44904 151.101.0.204:80 TIME_WAIT -
tcp 0 0 172.17.0.1:59162 172.17.0.3:5672 ESTABLISHED -
tcp 0 0 172.17.0.1:59180 172.17.0.3:5672 ESTABLISHED -
tcp 0 0 172.17.0.1:45130 172.17.0.5:6379 ESTABLISHED -
tcp 0 0 172.17.0.1:59140 172.17.0.3:5672 ESTABLISHED -
tcp 0 0 172.17.0.1:36002 172.17.0.4:27017 ESTABLISHED -
tcp 0 0 172.17.0.1:35922 172.17.0.4:27017 ESTABLISHED -
tcp 0 0 172.17.0.1:35970 172.17.0.4:27017 ESTABLISHED -
tcp 0 0 172.17.0.1:58364 172.17.0.4:27017 ESTABLISHED -
tcp6 0 0 :::6379 :::* LISTEN -
tcp6 0 0 :::15672 :::* LISTEN -
tcp6 0 0 :::5672 :::* LISTEN -
tcp6 0 0 :::27017 :::* LISTEN -
From the docker container:
curl -v localhost
Is giving
* Rebuilt URL to: localhost/
* Hostname was NOT found in DNS cache
* Trying ::1...
* connect to ::1 port 80 failed: Connection refused
* Trying 127.0.0.1...
* Connected to localhost (127.0.0.1) port 80 (#0)
> GET / HTTP/1.1
> User-Agent: curl/7.38.0
> Host: localhost
> Accept: */*
>
< HTTP/1.1 200 OK
* Server nginx/1.10.3 is not blacklisted
< Server: nginx/1.10.3
< Date: Fri, 29 Sep 2017 08:46:10 GMT
< Content-Type: text/plain
< Content-Length: 14
< Connection: keep-alive
<
* Connection #0 to host localhost left intact
gangnam style!
But curl -v website.dev is returning
* Rebuilt URL to: website.dev/
* Hostname was NOT found in DNS cache
* Trying 127.0.0.1...
* Connected to website.dev (127.0.0.1) port 80 (#0)
> GET / HTTP/1.1
> User-Agent: curl/7.38.0
> Host: website.dev
> Accept: */*
>
< HTTP/1.1 502 Bad Gateway
* Server nginx/1.10.3 is not blacklisted
< Server: nginx/1.10.3
< Date: Fri, 29 Sep 2017 08:46:37 GMT
< Content-Type: text/html
< Content-Length: 173
< Connection: keep-alive
<
<html>
<head><title>502 Bad Gateway</title></head>
<body bgcolor="white">
<center><h1>502 Bad Gateway</h1></center>
<hr><center>nginx/1.10.3</center>
</body>
</html>
* Connection #0 to host website.dev left intact
And docker inspect infra-nginx
[
{
"Id": "16941d22442a257f0874a772df935514c658ac16ec67eb3f65606b4d7c0ee62e",
"Created": "2017-09-29T08:31:21.144827953Z",
"Path": "nginx",
"Args": [
"-g",
"daemon off;"
],
"State": {
"Status": "running",
"Running": true,
"Paused": false,
"Restarting": false,
"OOMKilled": false,
"Dead": false,
"Pid": 2656,
"ExitCode": 0,
"Error": "",
"StartedAt": "2017-09-29T08:31:21.548119911Z",
"FinishedAt": "0001-01-01T00:00:00Z"
},
"Image": "sha256:25d085baee52923e32f8d134048238fb67e71173e01f758c391119235f7fc565",
"ResolvConfPath": "/var/lib/docker/containers/16941d22442a257f0874a772df935514c658ac16ec67eb3f65606b4d7c0ee62e/resolv.conf",
"HostnamePath": "/var/lib/docker/containers/16941d22442a257f0874a772df935514c658ac16ec67eb3f65606b4d7c0ee62e/hostname",
"HostsPath": "/var/lib/docker/containers/16941d22442a257f0874a772df935514c658ac16ec67eb3f65606b4d7c0ee62e/hosts",
"LogPath": "/var/lib/docker/containers/16941d22442a257f0874a772df935514c658ac16ec67eb3f65606b4d7c0ee62e/16941d22442a257f0874a772df935514c658ac16ec67eb3f65606b4d7c0ee62e-json.log",
"Name": "/infra-nginx",
"RestartCount": 0,
"Driver": "aufs",
"MountLabel": "",
"ProcessLabel": "",
"AppArmorProfile": "",
"ExecIDs": null,
"HostConfig": {
"Binds": null,
"ContainerIDFile": "",
"LogConfig": {
"Type": "json-file",
"Config": {}
},
"NetworkMode": "host",
"PortBindings": {},
"RestartPolicy": {
"Name": "no",
"MaximumRetryCount": 0
},
"AutoRemove": false,
"VolumeDriver": "",
"VolumesFrom": null,
"CapAdd": null,
"CapDrop": null,
"Dns": [],
"DnsOptions": [],
"DnsSearch": [],
"ExtraHosts": null,
"GroupAdd": null,
"IpcMode": "",
"Cgroup": "",
"Links": null,
"OomScoreAdj": 0,
"PidMode": "",
"Privileged": false,
"PublishAllPorts": false,
"ReadonlyRootfs": false,
"SecurityOpt": null,
"UTSMode": "",
"UsernsMode": "",
"ShmSize": 67108864,
"Runtime": "runc",
"ConsoleSize": [
0,
0
],
"Isolation": "",
"CpuShares": 0,
"Memory": 0,
"NanoCpus": 0,
"CgroupParent": "",
"BlkioWeight": 0,
"BlkioWeightDevice": null,
"BlkioDeviceReadBps": null,
"BlkioDeviceWriteBps": null,
"BlkioDeviceReadIOps": null,
"BlkioDeviceWriteIOps": null,
"CpuPeriod": 0,
"CpuQuota": 0,
"CpuRealtimePeriod": 0,
"CpuRealtimeRuntime": 0,
"CpusetCpus": "",
"CpusetMems": "",
"Devices": [],
"DeviceCgroupRules": null,
"DiskQuota": 0,
"KernelMemory": 0,
"MemoryReservation": 0,
"MemorySwap": 0,
"MemorySwappiness": -1,
"OomKillDisable": false,
"PidsLimit": 0,
"Ulimits": null,
"CpuCount": 0,
"CpuPercent": 0,
"IOMaximumIOps": 0,
"IOMaximumBandwidth": 0
},
"GraphDriver": {
"Data": null,
"Name": "aufs"
},
"Mounts": [],
"Config": {
"Hostname": "moby",
"Domainname": "",
"User": "",
"AttachStdin": false,
"AttachStdout": false,
"AttachStderr": false,
"ExposedPorts": {
"443/tcp": {},
"80/tcp": {}
},
"Tty": false,
"OpenStdin": false,
"StdinOnce": false,
"Env": [
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
"NGINX_VERSION=1.10.3-1~jessie"
],
"Cmd": [
"nginx",
"-g",
"daemon off;"
],
"ArgsEscaped": true,
"Image": "ajouve/infra-nginx:dev",
"Volumes": null,
"WorkingDir": "",
"Entrypoint": null,
"OnBuild": null,
"Labels": {}
},
"NetworkSettings": {
"Bridge": "",
"SandboxID": "175272649c9a9c5abbfde7516328bdab5cb3825e1e027eee0580eb18f7ff77cb",
"HairpinMode": false,
"LinkLocalIPv6Address": "",
"LinkLocalIPv6PrefixLen": 0,
"Ports": {},
"SandboxKey": "/var/run/docker/netns/default",
"SecondaryIPAddresses": null,
"SecondaryIPv6Addresses": null,
"EndpointID": "",
"Gateway": "",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"IPAddress": "",
"IPPrefixLen": 0,
"IPv6Gateway": "",
"MacAddress": "",
"Networks": {
"host": {
"IPAMConfig": null,
"Links": null,
"Aliases": null,
"NetworkID": "3e04a4c12b5a5b3f55e7b4e918dadec64806b5c926fc249e8aa3e28398a02954",
"EndpointID": "7de54daaa31230c9492a463792015af727e9562eaacbaa0c2d70cdc3d3b04236",
"Gateway": "",
"IPAddress": "",
"IPPrefixLen": 0,
"IPv6Gateway": "",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"MacAddress": "",
"DriverOpts": null
}
}
}
}
]
So when you run it using --net host it actually still is not on mac network as such. That is why it is not working.
From the documentation
The Mac has a changing IP address (or none if you have no network access). From 17.06 onwards our recommendation is to connect to the special Mac-only DNS name docker.for.mac.localhost which will resolve to the internal IP address used by the host.
So you need to change your config to
worker_processes 4;
pid /run/nginx.pid;
events {}
http {
server {
listen 80;
server_name localhost;
location / {
return 200 'gangnam style!';
}
}
server {
listen 80;
server_name website.dev;
location / {
proxy_pass http://docker.for.mac.localhost:3000;
proxy_set_header X-Base-Path "/";
proxy_set_header Website-Name "test";
}
}
}
And you should run the container as below
docker run --name infra-nginx -p 80:80 -d docker-registry.host.com:5000/infra-nginx:dev
You're making it way too complex. Just EXPOSE a port like 8080 from your app container.
In nginx use:
listen 80;
server_name _;
location / {
proxy_pass http://127.0.0.2:8080;
}
Ok I think the solution I did is simple enough.
First I have an app in nodejs running on the port 3000 on my local machine, so if I go to localhost:3000 in the browser I can see the app running.
Then I create a file called default.conf inside a folder in my machine ~/projects/docker/default.conf, you can create that file wherever you want. And paste this code inside the file:
server {
listen 80;
server_name myapp.com;
location / {
proxy_pass http://localhost:3000;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection 'upgrade';
proxy_set_header Host $host;
proxy_cache_bypass $http_upgrade;
}
}
Look the line proxy_pass http://localhost:3000; I am redirecting to my app running on my local machine.
Then I run a nginx container with the following command:
sudo docker run -d -p 80:80 --name="nginx" --net="host" -v ~/projects/docker/default.conf:/etc/nginx/conf.d/default.conf:ro nginx
The -p 80:80 is to bind that port to the nginx container
The --net="host" is to tell the container that the network it will use is the same that the host, it means the same as my machine, with this I can forward to the localhost:3000 from inside the container.
The -v ~/projects/docker/default.conf:/etc/nginx/conf.d/default.conf:ro
is to tell to the nginx that its configuration file will be the file I have in my local machine.
It means something like: You will find this /etc/nginx/conf.d/default.conf file in this ~/projects/docker/default.conf location
And that's all.
If I go to my browser and type just localhost (without the port) it will go through the nginx container and will be redirected to the app running in the localhost:3000 in my local machine
Let me know if this helps you

tcp connect fails randomly under high load

Our application uses a non-blocking socket usage with connect and select operations (c code). The pusedo code is as below:
unsigned int ConnectToServer(struct sockaddr_in *pSelfAddr,struct sockaddr_in *pDestAddr)
{
int sktConnect = -1;
sktConnect = socket(AF_INET,SOCK_STREAM,0);
if(sktConnect == INVALID_SOCKET)
return -1;
fcntl(sktConnect,F_SETFL,fcntl(sktConnect,F_GETFL) | O_NONBLOCK);
if(pSelfAddr != 0)
{
if(bind(sktConnect,(const struct sockaddr*)(void *)pSelfAddr,sizeof(*pSelfAddr)) != 0)
{
closesocket(sktConnect);
return -1;
}
}
errno = 0;
int nRc = connect(sktConnect,(const struct sockaddr*)(void *)pDestAddr, sizeof(*pDestAddr));
if(nrC != -1)
{
return sktConnect;
}
if(errno != EINPROGRESS)
{
int savedError = errno;
closesocket(sktConnect);
return -1;
}
fd_set scanSet;
FD_ZERO(&scanSet);
FD_SET(sktConnect,&scanSet);
struct timeval waitTime;
waitTime.tv_sec = 2;
waitTime.tv_usec = 0;
int tmp;
tmp = select(sktConnect +1, (fd_set*)0, &scanSet, (fd_set*)0,&waitTime);
if(tmp == -1 || !FD_ISSET(sktConnect,&scanSet))
{
int savedErrorNo = errno;
writeLog("Connect %s failed after select, cause %d, error %s",inet_ntoa(pDestAddr->sin_addr),savedErrorNo,strerror(savedErrorNo));
closesocket(sktConnect);
return -1;
}
. . . . .}
There are 80 such nodes and the application connects to all its peer in round-robin fashion.
In this phase, some of the nodes are not able to connect (api – connect + select) with error number 115.
In the below logs (of tcpdump output) for success scenario, we can
see (SYN, SYN+ACK, ACK) but no entry of even SYN is present for failed
node in tcpdump logs.
The tcpdump logs are:
387937 2012-07-05 07:45:30.646514 10.18.92.173 10.137.165.136 TCP 33728 > 8441 [SYN] Seq=0 Ack=0 Win=5792 Len=0 MSS=1460 TSV=1414450402 TSER=912308224 WS=8
387947 2012-07-05 07:45:30.780762 10.137.165.136 10.18.92.173 TCP 8441 > 33728 [SYN, ACK] Seq=0 Ack=1 Win=5792 Len=0 MSS=1460 TSV=912309754 TSER=1414450402 WS=8
387948 2012-07-05 07:45:30.780773 10.18.92.173 10.137.165.136 TCP 33728 > 8441 [ACK] Seq=1 Ack=1 Win=5888 Len=0 TSV=1414450435 TSER=912309754
All the above three events indicate the success information.
387949 2012-07-05 07:45:30.782652 10.18.92.173 10.137.165.136 TCP 33728 > 8441 [PSH, ACK] Seq=1 Ack=1 Win=5888 Len=320 TSV=1414450436 TSER=912309754
387967 2012-07-05 07:45:30.915615 10.137.165.136 10.18.92.173 TCP 8441 > 33728 [ACK] Seq=1 Ack=321 Win=6912 Len=0 TSV=912309788 TSER=1414450436
388011 2012-07-05 07:45:31.362712 10.18.92.173 10.137.165.136 TCP 33728 > 8441 [PSH, ACK] Seq=321 Ack=1 Win=5888 Len=320 TSV=1414450581 TSER=912309788
388055 2012-07-05 07:45:31.495558 10.137.165.136 10.18.92.173 TCP 8441 > 33728 [ACK] Seq=1 Ack=641 Win=7936 Len=0 TSV=912309933 TSER=1414450581
388080 2012-07-05 07:45:31.702336 10.137.165.136 10.18.92.173 TCP 8441 > 33728 [PSH, ACK] Seq=1 Ack=641 Win=7936 Len=712 TSV=912309985 TSER=1414450581
388081 2012-07-05 07:45:31.702350 10.18.92.173 10.137.165.136 TCP 33728 > 8441 [ACK] Seq=641 Ack=713 Win=7424 Len=0 TSV=1414450666 TSER=912309985
388142 2012-07-05 07:45:32.185612 10.137.165.136 10.18.92.173 TCP 8441 > 33728 [PSH, ACK] Seq=713 Ack=641 Win=7936 Len=320 TSV=912310106 TSER=1414450666
388143 2012-07-05 07:45:32.185629 10.18.92.173 10.137.165.136 TCP 33728 > 8441 [ACK] Seq=641 Ack=1033 Win=8704 Len=0 TSV=1414450786 TSER=912310106
388169 2012-07-05 07:45:32.362622 10.18.92.173 10.137.165.136 TCP 33728 > 8441 [PSH, ACK] Seq=641 Ack=1033 Win=8704 Len=320 TSV=1414450831 TSER=912310106
388212 2012-07-05 07:45:32.494833 10.137.165.136 10.18.92.173 TCP 8441 > 33728 [ACK] Seq=1033 Ack=961 Win=9216 Len=0 TSV=912310183 TSER=1414450831
388219 2012-07-05 07:45:32.501613 10.137.165.136 10.18.92.173 TCP 8441 > 33728 [PSH, ACK] Seq=1033 Ack=961 Win=9216 Len=356 TSV=912310185 TSER=1414450831
388220 2012-07-05 07:45:32.501624 10.18.92.173 10.137.165.136 TCP 33728 > 8441 [ACK] Seq=961 Ack=1389 Win=10240 Len=0 TSV=1414450865 TSER=912310185
Application Logs informing error on connect (i.e api - connect + select)
[5258: 2012-07-05 07:45:30]Connect [10.137.165.136 <- 10.18.92.173] success.
[5258: 2012-07-05 07:45:32]Connect 10.137.165.137 fail after select, cause:115, error Operation now in progress. Check whether remote machine exist and the network is normal or not.
[5258: 2012-07-05 07:45:32]Connect to server([10.137.165.137 <- 10.18.92.173], port=8441) Failed!
Success logs corresponding to first 3 entries of tcpdump. And failure log where there are no events in the tcpdump
My question is : When client initiates “connect” api for failed case,
i am not able to see any event in the tcpdump at client side (even
initial SYN). What can be the reason of this randomness.
You have hit EINPROGRESS. From the connect man page:
The socket is nonblocking and the connection cannot be completed immediately. It is possible to select(2) or poll(2) for completion by selecting the socket for writing. After select(2) indicates writability, use getsockopt(2) to read the SO_ERROR option at level SOL_SOCKET to determine whether connect() completed successfully (SO_ERROR is zero) or unsuccessfully (SO_ERROR is one of the usual error codes listed here, explaining the reason for the failure).
This is saying that EINPROGRESS is an indicator that the kernel is not able to complete the connection now, even though there are available local ports and routing cache entries. It seems this occurs when the socket state has not transitioned to "ESTABLISHED" yet. Just wait on the socket in select again, but call getsockopt afterwards to see if your connect had completed.
As to why, the socket transitions to SYN_SENT state during connect, but the packet may still be in the output queue and has not actually made it to the network device buffer yet.
After select() returns, you are not actually fetching the current status of the socket - you are seeing a stale value in errno (left over from the connect() call). Most likely your select() is simply returning after the timeout.
You need to call getsockopt(sktConnect, SOL_SOCKET, SO_ERROR, &err, ...) to get the actual status of the socket after select() returns.

Resources