Testing captured IO from a spawned process - io

I want to test the return value and the IO output on the following method:
defmodule Speaker do
def speak do
receive do
{ :say, msg } ->
IO.puts(msg)
speak
_other ->
speak # throw away the message
end
end
end
In the ExUnit.CaptureIO docs, there is an example test that does this which looks like the following:
test "checking the return value and the IO output" do
fun = fn ->
assert Enum.each(["some", "example"], &(IO.puts &1)) == :ok
end
assert capture_io(fun) == "some\nexample\n"
end
Given that, I thought I could write the following test that performs a similar action but with a spawned process:
test ".speak with capture io" do
pid = Kernel.spawn(Speaker, :speak, [])
fun = fn ->
assert send(pid, { :say, "Hello" }) == { :say, "Hello" }
end
assert capture_io(fun) == "Hello\n"
end
However, I get the following error message telling me there was no output, even though I can see output on the terminal:
1) test .speak with capture io (SpeakerTest)
test/speaker_test.exs:25
Assertion with == failed
code: capture_io(fun) == "Hello\n"
lhs: ""
rhs: "Hello\n"
stacktrace:
test/speaker_test.exs:30: (test)
So, am I missing something perhaps with regards to testing spawned processes or methods that use the receive macro? How can I change my test to make it pass?

CaptureIO might not be suited for what you're trying to do here. It runs a function and returns the captured output when that function returns. But your function never returns, so seems like this won't work. I came up with the following workaround:
test ".speak with capture io" do
test_process = self()
pid = spawn(fn ->
Process.group_leader(self(), test_process)
Speaker.speak
end)
send(pid, {:say, "Hello"})
assert_receive {:io_request, _, _, {:put_chars, :unicode, "Hello\n"}}
# Just to cleanup pid which dies upon not receiving a correct response
# to the :io_request after a timeout
Process.exit(pid, :kill)
end
It uses Process.group_leader to set the current process as the receiver of IO messages for the tested process and then asserts that these messages arrive.

I had a similar problem, I had a registered process on my Application that would timeout every 10 seconds and write to stdio with IO.binwrite, to simulate multiple timeouts I took upon #Pawel-Obrok answer, but change it as to reply the :io_request with an :io_reply, that way the process would not hang allowing me to send multiple messages.
defp assert_io() do
send(MyProcess, :timeout)
receive do
{:io_request, _, reply_as, {:put_chars, _, msg}} ->
assert msg == "Some IO message"
send(Stats, {:io_reply, reply_as, :ok})
_ ->
flunk
end
end
test "get multiple messages" do
Process.group_leader(Process.whereis(MyProcess), self())
assert_io()
assert_io()
end
If you want to know more about the IO protocol take a look at the erlang docs about it.

Related

What does error: sensing to an uninitialized chan mean in ispin?

ispin is generating this message on the progress window (the mid bottom screen on the simulate tab):
Error: sending to an uninitialized chan
The weird thing is that the error message starts to appear in the middle of the simulation (I set the maximum step number to 10000 and the it starts to appear around 6000 steps).
How can this be? does spin somehow lose the chan initialization in the middle of the simulation?
this is initialization of one of the channel I use:
chan VP = [1] of {byte};
and this is the error message during the simulation:
This is a mcve for the the error you are experiencing:
chan c;
init {
c!10;
}
which yields
~$ spin test.pml
Error: sending to an uninitialized chan
timeout
Error: sending to an uninitialized chan
#processes: 1
0: proc 0 (:init::1) test.pml:4 (state 1)
1 process created
It is possible that you forgot to state whether the channel is synchronous or asynchronous, and what kind of messages it should contain. A proper channel declaration should look like this:
chan c = [N] of { type_1, ..., type_M };
where N is larger or equal 1 for any asynchronous channel and 0 otherwise, and type_1, ..., type_M is the list of types (i.e. int, bool) of the fields contained in one message.
For more details, read the documentation.

SyncVar transfer producer/consumer threads in scala

Note: The problem that I solve has only educational purpose, I know that abstraction that I want to create is error prone and so on... I don't need fast solution, I need explanation.
In the book I am reading there is exercise that says that I need to implement SyncVar which has the following interface:
class SyncVar[T] {
def get(): T = ???
def put(x: T): Unit = ???
}
My comment: Alright seems understandable, need some sync variable that I can put or get.
A SyncVar object is used to exchange values between two or more threads.
When created, the SyncVar object is empty:
° Calling get throws an exception
° Calling put adds a value to the SyncVar object
After a value is added to a SyncVar object, we can say that it is non-empty:
° Calling get returns the current value, and changes the state to empty
° Calling put throws an exception
My thoughts: This is variable that throws exception on empty value when calling get, or put when we have a value, when we call get it clears previous value. Seems like I need to use Option.
So I provide the following implementation:
class SyncVar[T] {
var value: Option[T] = None
def get(): T = value match {
case Some(t) => this.synchronized {
value = None
t
}
case None => throw new IllegalArgumentException("error get")
}
def put(x: T): Unit = this.synchronized{
value match {
case Some(t) => throw new IllegalArgumentException("error put")
case None => value = Some(x)
}
}
def isEmpty = value.isEmpty
def nonEmpty = value.nonEmpty
}
My comment:
Synchronously invoking put and get, also have isEmpty and nonEmpty
The next task makes me confused:
The SyncVar object from the previous exercise can be cumbersome to use,
due to exceptions when the SyncVar object is in an invalid state. Implement
a pair of methods isEmpty and nonEmpty on the SyncVar object. Then,
implement a producer thread that transfers a range of numbers 0 until 15
to the consumer thread that prints them.
As I understand I need two threads:
//producer thread that produces numbers from 1 to 15
val producerThread = thread{
for (i <- 0 until 15){
println(s"$i")
if (syncVar.isEmpty) {
println(s"put $i")
syncVar.put(i)
}
}
}
//consumer that prints value from 0 to 15
val consumerThread = thread{
while (true) {
if (syncVar.nonEmpty) println(s"get ${syncVar.get()}")
}
}
Question:
But this code caused by nondeterminism, so it has different result each time, while I need to print numbers from 1 to 15 (in right order). Could you explain me what is wrong with my solution?
First, your synchronized in get is too narrow. It should surround the entire method, like in put (can you think why?).
After fixing, consider this scenario:
producerThread puts 0 into syncVar.
producerThread continues to run and tries to put 1. syncVar.isEmpty returns false so it doesn't put 1. It continues to loop with next i instead.
consumerThread gets 0.
producerThread puts 2.
Etc. So consumerThread can never get and print 1, because producerThread never puts it there.
Think what producerThread should do if syncVar is not empty and what consumerThread should do if it is.
Thanks to #Alexey Romanov, finally I implement transfer method:
Explanation:
The idea is, that producer thread checks is syncVar is empty, if it is it puts it, otherwise it waits with while(syncVar.nonEmpty){} (using busy waiting, which is bad practice, but it is important to know about it in educational purpose) and when we leaving the loop(stop busy waiting) we putting variable and leaving for loop for i == 0. Meanwhile consumer thread busy waiting forever, and reads variable when it is nonEmpty.
Solution:
def transfer() = {
val syncVar = new SyncVar[Int]
val producerThread = thread{
log("producer thread started")
for (i <- 0 until 15){
if (syncVar.isEmpty) {
syncVar.put(i)
} else {
while (syncVar.nonEmpty) {
log("busy wating")
}
if (syncVar.isEmpty) {
syncVar.put(i)
}
}
}
}
val consumerThread = thread{
log("consumer thread started")
while (true) {
if (syncVar.nonEmpty) {
syncVar.get()
}
}
}
}

Crystal convert the idea behind Thread pool to Fibers/spawn

I'm having some hard time learning the idea behind Fibers\coroutines and the implementation in Crystal.
I hope this is the right place to ask this, I'll totally accept a "not here" answer :)
This is my usual way of handling multi-threading in Ruby:
threads = []
max_threads = 10
loop do
begin
threads << Thread.new do
helper_method(1,2,3,4)
end
rescue Exception => e
puts "Error Starting thread"
end
begin
threads = threads.select { |t| t.alive? ? true : (t.join; false) }
while threads.size >= max_threads
puts 'Got Maximum threads'
sleep 1
threads = threads.select { |t| t.alive? ? true : (t.join; false) }
end
rescue Exception => e
puts e
end
end
This way I open a new Thread, usually of a incoming connection or some other thing, add the Thread to a threads array, and then check that I don't have more threads then what I wanted.
What would be a good way to implement something similar in Crystal using spawn\channels\fibers etc.. ?
Something like this:
require "socket"
ch = Channel(TCPSocket).new
10.times do
spawn do
loop do
socket = ch.receive
socket.puts "Hi!"
socket.close
end
end
end
server = TCPServer.new(1234)
loop do
socket = server.accept
ch.send socket
end
This code will pre-spawn 10 fibers to attend the requests. The channel is unbuffered so the connections wont be queuing if they cannot be attended by any fiber.
You can't replicate the way it works for threads. spawn doesn't return a coroutine object, and there ain't no way to join coroutines.
Yet we can open a channel to communicate between the coroutines and the pool manager. This manager may run within it's own coroutine or be the main coroutine —that will prevent the process from exiting.
Here is a working example, with a worker(&block) method that will spawn a coroutine, and open a channel to return its status (it failed or it terminated), and a pool(&block) method that will keep a pool of such workers and read from the result channels to know the state of the coroutines, and keep spawning new ones.
def worker(&block)
result = UnbufferedChannel(Exception?).new
::spawn do
begin
block.call
rescue ex
result.send(ex)
else
result.send(nil)
end
end
result
end
def pool(size, &block)
counter = 0
results = [] of UnbufferedChannel(Exception?)
loop do
while counter < size
counter += 1
puts "spawning worker"
results << worker(&block)
end
result = Channel.select(results)
counter -= 1
results.delete(result)
if ex = result.receive
puts "ERROR: #{ex.message}"
else
puts "worker terminated"
end
end
end
pool(5) do
loop { helper_method(1, 2, 3, 4) }
end

How can I use channel send direction in Go

In Go, it can be specified which direction a channel can send. I am trying to create an example about it, look at the following code:
package main
import (
"fmt"
"time"
)
func main() {
ic_send_only := make(<-chan int) //a channel that can only send data - arrow going out is sending
ic_recv_only := make(chan<- int) //a channel that can only receive a data - arrow going in is receiving
go func() {
ic_recv_only <- 4555
}()
go func() {
ic_send_only <- ic_recv_only
}()
fmt.Println(ic_recv_only)
time.Sleep(10000)
}
I get the compiler error
# command-line-arguments
.\send_receive.go:19: invalid operation: ic_send_only <- ic_recv_only (send to receive-only type <-chan int)
[Finished in 0.2s with exit code 2]
How can I use channel direction in the right way?
Or does anyone have a better sample than me?
Three issues:
You have the send and receive operations reversed (which is the error you're seeing)
Creating recv-only or send-only channels make no sense, as you cannot use them
The notation you're using is trying to send the channel itself, not the result. You need to receive and send, which requires two arrows.
ic_recv_only <- <-ic_send_only
You may be confused because you have the terminology reversed. <-ch is a "receive operation", and ch <- is a send operation. Note that in your example, everything would be deadlocked because you can't complete the corresponding sends and receives to pass something through either channel.
Here is a complete example:
// This receives an int from a channel. The channel is receive-only
func consumer(ch <-chan int) int {
return <-ch
}
// This sends an int over a channel. The channel is send-only
func producer(i int, ch chan<- int) {
ch <- i
}
func main() {
ch := make(chan int)
go producer(42, ch)
result := consumer(ch)
fmt.Println("received", result)
}
The key point that JimB has made, to summarise, is that
You create channels using make
Every channel has two ends.
You communicate via the ends of channels via <-. The ends are important.
There is a sending end and a receiving end; channels are unidirectional.
Note also that each end can be safely accessed concurrently by more than one goroutine.
Also note that JimB's example producer(i int, ch chan<- int) and consumer(ch <-chan int) functions have parameters that specify which end they use via <-chan and chan<-, instead of just chan. Although this is optional, it is good practice because the compiler will help you fix silly mistakes if you do this.

What are Lua coroutines even for? Why doesn't this code work as I expect it?

I'm having trouble understanding this code... I was expecting something similar to threading where I would get an output with random "nooo" and "yaaaay"s interspersed with each other as they both do the printing asynchronously, but rather I discovered that the main thread seems to block on the first calling of coroutine.resume() and thus prevents the next from being started until the first has yielded.
If this is the intended operation coroutines, what are they useful for, and how would I achieve the goal I was hoping for? Would I have to implement my own scheduler for these coroutines to operate asynchronously?, because that seems messy, and I may as well use functions!
co1 = coroutine.create(function ()
local i = 1
while i < 200 do
print("nooo")
i = i + 1
end
coroutine.yield()
end)
co2 = coroutine.create(function ()
local i = 1
while i < 200 do
print("yaaaay")
i = i + 1
end
coroutine.yield()
end)
coroutine.resume(co1)
coroutine.resume(co2)
Coroutines aren't threads.
Coroutines are like threads that are never actively scheduled. So yes you are kinda correct that you would have to write you own scheduler to have both coroutines run simultaneously.
However you are missing the bigger picture when it comes to coroutines. Check out wikipedia's list of coroutine uses. Here is one concrete example that might guide you in the right direction.
-- level script
-- a volcano erupts every 2 minutes
function level_with_volcano( interface )
while true do
wait(seconds(5))
start_eruption_volcano()
wait(frames(10))
s = play("rumble_sound")
wait( end_of(s) )
start_camera_shake()
-- more stuff
wait(minutes(2))
end
end
The above script could be written to run iteratively with a switch statement and some clever state variables. But it is much more clear when written as a coroutine. The above script could be a thread but do you really need to dedicate a kernel thread to this simple code. A busy game level could have 100's of these coroutines running without impacting performance. However if each of these were a thread you might get away with 20-30 before performance started to suffer.
A coroutine is meant to allow me to write code that stores state on the stack so that I can stop running it for a while (the wait functions) and start it again where I left off.
Since there have been a number of comments asking how to implement the wait function that would make deft_code's example work, I've decided to write a possible implementation. The general idea is that we have a scheduler with a list of coroutines, and the scheduler decides when to return control to the coroutines after they give up control with their wait calls. This is desirable because it makes asynchronous code be readable and easy to reason about.
This is only one possible use of coroutines, they are a more general abstraction tool that can be used for many different purposes (such as writing iterators and generators, writing stateful stream processing objects (for example, multiple stages in a parser), implementing exceptions and continuations, etc.).
First: the scheduler definition:
local function make_scheduler()
local script_container = {}
return {
continue_script = function(frame, script_thread)
if script_container[frame] == nil then
script_container[frame] = {}
end
table.insert(script_container[frame],script_thread)
end,
run = function(frame_number, game_control)
if script_container[frame_number] ~= nil then
local i = 1
--recheck length every time, to allow coroutine to resume on
--the same frame
local scripts = script_container[frame_number]
while i <= #scripts do
local success, msg =
coroutine.resume(scripts[i], game_control)
if not success then error(msg) end
i = i + 1
end
end
end
}
end
Now, initialising the world:
local fps = 60
local frame_number = 1
local scheduler = make_scheduler()
scheduler.continue_script(frame_number, coroutine.create(function(game_control)
while true do
--instead of passing game_control as a parameter, we could
--have equivalently put these values in _ENV.
game_control.wait(game_control.seconds(5))
game_control.start_eruption_volcano()
game_control.wait(game_control.frames(10))
s = game_control.play("rumble_sound")
game_control.wait( game_control.end_of(s) )
game_control.start_camera_shake()
-- more stuff
game_control.wait(game_control.minutes(2))
end
end))
The (dummy) interface to the game:
local game_control = {
seconds = function(num)
return math.floor(num*fps)
end,
minutes = function(num)
return math.floor(num*fps*60)
end,
frames = function(num) return num end,
end_of = function(sound)
return sound.start+sound.duration-frame_number
end,
wait = function(frames_to_wait_for)
scheduler.continue_script(
frame_number+math.floor(frames_to_wait_for),
coroutine.running())
coroutine.yield()
end,
start_eruption_volcano = function()
--obviously in a real game, this could
--affect some datastructure in a non-immediate way
print(frame_number..": The volcano is erupting, BOOM!")
end,
start_camera_shake = function()
print(frame_number..": SHAKY!")
end,
play = function(soundname)
print(frame_number..": Playing: "..soundname)
return {name = soundname, start = frame_number, duration = 30}
end
}
And the game loop:
while true do
scheduler.run(frame_number,game_control)
frame_number = frame_number+1
end
co1 = coroutine.create(
function()
for i = 1, 100 do
print("co1_"..i)
coroutine.yield(co2)
end
end
)
co2 = coroutine.create(
function()
for i = 1, 100 do
print("co2_"..i)
coroutine.yield(co1)
end
end
)
for i = 1, 100 do
coroutine.resume(co1)
coroutine.resume(co2)
end

Resources