@InProceedings{BjorndalenSampson08, title = "{P}rocess-{O}riented {C}ollective {O}perations", author= "Bjørndalen, John Markus and Sampson, Adam T.", editor= "Welch, Peter H. and Stepney, S. and Polack, F.A.C and Barnes, Frederick R. M. and McEwan, Alistair A. and Stiles, G. S. and Broenink, Jan F. and Sampson, Adam T.", pages = "309--328", booktitle= "{C}ommunicating {P}rocess {A}rchitectures 2008", isbn= "978-1-58603-907-3", year= "2008", month= "sep", abstract= "Distributing process-oriented programs across a cluster of machines requires careful attention to the effects of network latency. The MPI standard, widely used for cluster computation, defines a number of collective operations: efficient, reusable algorithms for performing operations among a group of machines in the cluster. In this paper, we describe our techniques for implementing MPI communication patterns in process-oriented languages, and how we have used them to implement collective operations in PyCSP and occam-\π on top of an asynchronous messaging framework. We show how to make use of collective operations in distributed processoriented applications. We also show how the process-oriented model can be used to increase concurrency in existing collective operation algorithms." }