The GNUnet Bibliography | BibTeX records
By topic | By date | By author
@article{10.1109/ICPP.2003.1240580, title = {HIERAS: A DHT Based Hierarchical P2P Routing Algorithm}, author = {Zhiyong Xu and Rui Min and Yiming Hu}, journal = {Parallel Processing, International Conference on}, year = {2003}, address = {Los Alamitos, CA, USA}, pages = {0--187}, publisher = {IEEE Computer Society}, abstract = {Routing algorithm has great influence on system overall performance in Peer-to-Peer (P2P) applications. In current DHT based routing algorithms, routing tasks are distributed across all system peers. However, a routing hop could happen between two widely separated peers with high network link latency which greatly increases system routing overheads. In this paper, we propose a new P2P routing algorithm--- HIERAS to relieve this problem, it keeps scalability property of current DHT algorithms and improves system routing performance by the introduction of hierarchical structure. In HIERAS, we create several lower level P2P rings besides the highest level P2P ring. A P2P ring is a subset of the overall P2P overlay network. We create P2P rings in such a strategy that the average link latency between two peers in lower level rings is much smaller than higher level rings. Routing tasks are first executed in lower level rings before they go up to higher level rings, a large portion of routing hops previously executed in the global P2P ring are now replaced by hops in lower level rings, thus routing overheads can be reduced. The simulation results show HIERAS routing algorithm can significantly improve P2P system routing performance}, www_section = {distributed hash table, P2P}, issn = {0190-3918}, doi = {10.1109/ICPP.2003.1240580}, url = {http://www.computer.org/portal/web/csdl/doi/10.1109/ICPP.2003.1240580}, }
@article{10.1109/MASCOT.2005.73, title = {The Feasibility of DHT-based Streaming Multicast}, author = {Stefan Birrer and Fabian E. Bustamante}, journal = {2012 IEEE 20th International Symposium on Modeling, Analysis and Simulation of Computer and Telecommunication Systems}, year = {2005}, address = {Los Alamitos, CA, USA}, pages = {288--298}, publisher = {IEEE Computer Society}, issn = {1526-7539}, doi = {http://doi.ieeecomputersociety.org/10.1109/MASCOT.2005.73}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/SBirrer-dhtBasedMulticast_0.pdf}, %%%%% ERROR: Missing field % www_section = {?????}, }
@article{10.1109/MDSO.2005.31, title = {Free Riding on Gnutella Revisited: The Bell Tolls?}, author = {Daniel Hughes and Geoff Coulson and James Walkerdine}, journal = {IEEE Distributed Systems Online}, volume = {6}, year = {2005}, month = {June}, address = {Los Alamitos, CA, USA}, chapter = {1}, publisher = {IEEE Computer Society}, abstract = {Individuals who use peer-to-peer (P2P) file-sharing networks such as Gnutella face a social dilemma. They must decide whether to contribute to the common good by sharing files or to maximize their personal experience by free riding, downloading files while not contributing any to the network. Individuals gain no personal benefits from uploading files (in fact, it's inconvenient), so it's "rational" for users to free ride. However, significant numbers of free riders degrade the entire system's utility, creating a "tragedy of the digital commons." In this article, a new analysis of free riding on the Gnutella network updates data from 2000 and points to an increasing downgrade in the network's overall performance and the emergence of a "metatragedy" of the commons among Gnutella developers}, www_section = {distributed systems, free riding, Gnutella, peer-to-peer networking}, issn = {1541-4922}, doi = {http://doi.ieeecomputersociety.org/10.1109/MDSO.2005.31}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/IEEE\%20DSO\%20-\%20Free\%20riding\%20on\%20Gnutella\%20revisited.pdf}, }
@article{10.1109/MOBIQUITOUS.2005.29, title = {Exploiting co-location history for ef.cient service selection in ubiquitous computing systems}, author = {Alexandros Karypidis and Spyros Lalis}, journal = {Mobile and Ubiquitous Systems, Annual International Conference on}, year = {2005}, address = {Los Alamitos, CA, USA}, pages = {202--212}, publisher = {IEEE Computer Society}, abstract = {As the ubiquitous computing vision materializes, the number and diversity of digital elements in our environment increases. Computing capability comes in various forms and is embedded in different physical objects, ranging from miniature devices such as human implants and tiny sensor particles, to large constructions such as vehicles and entire buildings. The number of possible interactions among such elements, some of which may be invisible or offer similar functionality, is growing fast so that it becomes increasingly hard to combine or select between them. Mechanisms are thus required for intelligent matchmaking that will achieve controlled system behavior, yet without requiring the user to continuously input desirable options in an explicit manner. In this paper we argue that information about the colocation relationship of computing elements is quite valuable in this respect and can be exploited to guide automated service selection with minimal or no user involvement. We also discuss the implementation of such mechanism that is part of our runtime system for smart objects}, isbn = {0-7695-2375-7}, doi = {10.1109/MOBIQUITOUS.2005.29}, url = {http://www.computer.org/portal/web/csdl/doi/10.1109/MOBIQUITOUS.2005.29}, %%%%% ERROR: Missing field % www_section = {?????}, }
@article{10.1109/P2P.2001.990421, title = {Search in JXTA and Other Distributed Networks}, author = {Sherif Botros and Steve Waterhouse}, journal = {Peer-to-Peer Computing, IEEE International Conference on}, year = {2001}, address = {Los Alamitos, CA, USA}, pages = {0--0030}, publisher = {IEEE Computer Society}, isbn = {0-7695-1503-7}, doi = {http://doi.ieeecomputersociety.org/10.1109/P2P.2001.990421}, url = {https://bibliography.gnunet.org}, %%%%% ERROR: Missing field % www_section = {?????}, }
@article{10.1109/PERSER.2005.1506410, title = {Service discovery using volunteer nodes for pervasive environments}, author = {Mijeom Kim and Mohan Kumar and Behrooz Shirazi}, journal = {International Conference on Pervasive Services}, year = {2005}, address = {Los Alamitos, CA, USA}, pages = {188--197}, publisher = {IEEE Computer Society}, abstract = {We propose a service discovery architecture called VSD (service discovery based on volunteers) for heterogeneous and dynamic pervasive computing environments. The proposed architecture uses a small subset of the nodes called volunteers that perform directory services. Relatively stable and capable nodes serve as volunteers, thus recognizing node heterogeneity in terms of mobility and capability. We discuss characteristics of VSD architecture and methods to improve connectivity among volunteers for higher discovery rate. By showing that VSD performs quite well compared to a broadcast based scheme in MANET scenarios, we validate that VSD is a flexible and adaptable architecture appropriate for dynamic pervasive computing environments. VSD incorporates several novel features: i) handles dynamism and supports self-reconfiguration; ii) provides physical locality and scalability; and iii) improves reliability and copes with uncertainty through redundancy by forming overlapped clusters}, isbn = {0-7803-9032-6}, doi = {10.1109/PERSER.2005.1506410}, url = {http://www.computer.org/portal/web/csdl/doi/10.1109/PERSER.2005.1506410}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/31.pdf}, %%%%% ERROR: Missing field % www_section = {?????}, }
@conference{10.1109/PTP.2003.1231513, title = {Identity Crisis: Anonymity vs. Reputation in P2P Systems}, author = {Marti, Sergio and Hector Garcia-Molina}, booktitle = {P2P'03. Proceecings of the 3rd International Conference on Peer-to-Peer Computing}, organization = {IEEE Computer Society}, year = {2003}, month = {September}, address = {Link{\"o}ping, Sweden}, pages = {0--134}, publisher = {IEEE Computer Society}, abstract = {The effectiveness of reputation systems for peer-to-peer resource-sharing networks is largely dependent on the reliability of the identities used by peers in the network. Much debate has centered around how closely one's pseudoidentity in the network should be tied to their real-world identity, and how that identity is protected from malicious spoofing. In this paper we investigate the cost in efficiency of two solutions to the identity problem for peer-to-peer reputation systems. Our results show that, using some simple mechanisms, reputation systems can provide a factor of 4 to 20 improvement in performance over no reputation system, depending on the identity model used}, www_section = {anonymity, identity, identity model, P2P, peer-to-peer networking, reliability, reputation, reputation system}, isbn = {0-7695-2023-5}, doi = {http://doi.ieeecomputersociety.org/10.1109/PTP.2003.1231513}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/P2P\%2703\%20-\%20Identity\%20crisis\%3A\%20anonymity\%20vs\%20reputation.pdf}, }
@article{10.1109/SFCS.2002.1181950, title = {LT Codes}, author = {Luby, Michael}, journal = {Foundations of Computer Science, Annual IEEE Symposium on}, year = {2002}, address = {Los Alamitos, CA, USA}, pages = {0--271}, publisher = {IEEE Computer Society}, abstract = {We introduce LT codes, the first rateless erasure codes that are very efficient as the data length grows}, www_section = {coding theory}, isbn = {0-7695-1822-2}, issn = {0272-5428}, doi = {10.1109/SFCS.2002.1181950}, url = {http://www.computer.org/portal/web/csdl/abs/proceedings/focs/2002/1822/00/18220271abs.htm}, }
@article{10.1109/SP.1980.10006, title = {Protocols for Public Key Cryptosystems}, author = {Ralph C. Merkle}, journal = {Security and Privacy, IEEE Symposium on}, year = {1980}, address = {Los Alamitos, CA, USA}, pages = {0--122}, publisher = {IEEE Computer Society}, abstract = {New Cryptographic protocols which take full advantage of the unique properties of public key cryptosystems are now evolving. Several protocols for public key distribution and for digital signatures are briefly compared with each other and with the conventional alternative}, issn = {1540-7993}, doi = {10.1109/SP.1980.10006}, url = {http://www.computer.org/portal/web/csdl/doi/10.1109/SP.1980.10006}, www_section = {Unsorted}, }
@article{10.1109/WOWMOM.2007.4351805, title = {A Game Theoretic Model of a Protocol for Data Possession Verification}, author = {Nouha Oualha and Pietro Michiardi and Yves Roudier}, journal = {A World of Wireless, Mobile and Multimedia Networks, International Symposium on}, year = {2007}, address = {Los Alamitos, CA, USA}, pages = {1--6}, publisher = {IEEE Computer Society}, abstract = {This paper discusses how to model a protocol for the verification of data possession intended to secure a peer-to-peer storage application. The verification protocol is a primitive for storage assessment, and indirectly motivates nodes to behave cooperatively within the application. The capability of the protocol to enforce cooperation between a data holder and a data owner is proved theoretically by modeling the verification protocol as a Bayesian game, and demonstrating that the solution of the game is an equilibrium where both parties are cooperative}, www_section = {P2P}, isbn = {978-1-4244-0992-1}, doi = {10.1109/WOWMOM.2007.4351805}, url = {http://www.computer.org/portal/web/csdl/doi/10.1109/WOWMOM.2007.4351805}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/oualno-070618.pdf}, }
@conference{1007919, title = {Simple efficient load balancing algorithms for peer-to-peer systems}, author = {David Karger and Ruhl, Matthias}, booktitle = {SPAA '04: Proceedings of the sixteenth annual ACM symposium on Parallelism in algorithms and architectures}, organization = {ACM}, year = {2004}, address = {New York, NY, USA}, pages = {36--43}, publisher = {ACM}, abstract = {Load balancing is a critical issue for the efficient operation of peer-to-peer networks. We give two new load-balancing protocols whose provable performance guarantees are within a constant factor of optimal. Our protocols refine the consistent hashing data structure that underlies the Chord (and Koorde) P2P network. Both preserve Chord's logarithmic query time and near-optimal data migration cost.Consistent hashing is an instance of the distributed hash table (DHT) paradigm for assigning items to nodes in a peer-to-peer system: items and nodes are mapped to a common address space, and nodes have to store all items residing closeby in the address space.Our first protocol balances the distribution of the key address space to nodes, which yields a load-balanced system when the DHT maps items "randomly" into the address space. To our knowledge, this yields the first P2P scheme simultaneously achieving O(log n) degree, O(log n) look-up cost, and constant-factor load balance (previous schemes settled for any two of the three).Our second protocol aims to directly balance the distribution of items among the nodes. This is useful when the distribution of items in the address space cannot be randomized. We give a simple protocol that balances load by moving nodes to arbitrary locations "where they are needed." As an application, we use the last protocol to give an optimal implementation of a distributed data structure for range searches on ordered data}, www_section = {load balancing, P2P}, isbn = {1-58113-840-7}, doi = {10.1145/1007912.1007919}, url = {http://portal.acm.org/citation.cfm?id=1007919$\#$}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.88.2405.pdf}, }
@conference{1013317, title = {Energy-aware demand paging on NAND flash-based embedded storages}, author = {Chanik Park and Kang, Jeong-Uk and Park, Seon-Yeong and Kim, Jin-Soo}, booktitle = {ISLPED '04: Proceedings of the 2004 international symposium on Low power electronics and design}, organization = {ACM}, year = {2004}, address = {New York, NY, USA}, pages = {338--343}, publisher = {ACM}, abstract = {The ever-increasing requirement for high-performance and huge-capacity memories of emerging embedded applications has led to the widespread adoption of SDRAM and NAND flash memory as main and secondary memories, respectively. In particular, the use of energy consuming memory, SDRAM, has become burdensome in battery-powered embedded systems. Intuitively, though demand paging can be used to mitigate the increasing requirement of main memory size, its applicability should be deliberately elaborated since NAND flash memory has asymmetric operation characteristics in terms of performance and energy consumption.In this paper, we present energy-aware demand paging technique to lower the energy consumption of embedded systems considering the characteristics of interactive embedded applications with large memory footprints. We also propose a flash memory-aware page replacement policy that can reduce the number of write and erase operations in NAND flash memory. With real-life workloads, we show the system-wide Energy{\textperiodcentered}Delay can be reduced by 15~30\% compared to the traditional shadowing architecture}, isbn = {1-58113-929-2}, doi = {10.1145/1013235.1013317}, url = {http://doi.acm.org/10.1145/1013235.1013317}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/2004-ISLPED-Energy-aware\%20demand\%20paging\%20on\%20NAND\%20flash-based\%20embedded\%20storages.pdf}, %%%%% ERROR: Missing field % www_section = {?????}, }
@article{1015507, title = {Mercury: supporting scalable multi-attribute range queries}, author = {Bharambe, Ashwin R. and Agrawal, Mukesh and Seshan, Srinivasan}, journal = {SIGCOMM Comput. Commun. Rev}, volume = {34}, number = {4}, year = {2004}, address = {New York, NY, USA}, pages = {353--366}, publisher = {ACM}, abstract = {This paper presents the design of Mercury, a scalable protocol for supporting multi-attribute range-based searches. Mercury differs from previous range-based query systems in that it supports multiple attributes as well as performs explicit load balancing. To guarantee efficient routing and load balancing, Mercury uses novel light-weight sampling mechanisms for uniformly sampling random nodes in a highly dynamic overlay network. Our evaluation shows that Mercury is able to achieve its goals of logarithmic-hop routing and near-uniform load balancing.We also show that Mercury can be used to solve a key problem for an important class of distributed applications: distributed state maintenance for distributed games. We show that the Mercury-based solution is easy to use, and that it reduces the game's messaging overheard significantly compared to a na{\"\i}ve approach}, www_section = {distributed hash table, load balancing, mercury, P2P, random sampling, range queries}, issn = {0146-4833}, doi = {10.1145/1030194.1015507}, url = {http://portal.acm.org/citation.cfm?id=1030194.1015507$\#$}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/p625-bharambe1.pdf}, }
@conference{1021938, title = {Erasure Code Replication Revisited}, author = {Lin, W. K. and Chiu, Dah Ming and Lee, Y. B.}, booktitle = {P2P '04: Proceedings of the Fourth International Conference on Peer-to-Peer Computing}, organization = {IEEE Computer Society}, year = {2004}, address = {Washington, DC, USA}, pages = {90--97}, publisher = {IEEE Computer Society}, abstract = {Erasure coding is a technique for achieving high availability and reliability in storage and communication systems. In this paper, we revisit the analysis of erasure code replication and point out some situations when whole-file replication is preferred. The switchover point (from preferring whole-file replication to erasure code replication) is studied, and characterized using asymptotic analysis. We also discuss the additional considerations in building erasure code replication systems}, isbn = {0-7695-2156-8}, doi = {10.1109/P2P.2004.17}, url = {http://portal.acm.org/citation.cfm?id=1021938$\#$}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.109.2034.pdf}, %%%%% ERROR: Missing field % www_section = {?????}, }
@article{1026492, title = {Basic Concepts and Taxonomy of Dependable and Secure Computing}, author = {Avizienis, Algirdas and Laprie, Jean-Claude and Randell, Brian and Carl Landwehr}, journal = {IEEE Trans. Dependable Secur. Comput}, volume = {1}, number = {1}, year = {2004}, address = {Los Alamitos, CA, USA}, pages = {11--33}, publisher = {IEEE Computer Society Press}, abstract = {This paper gives the main definitions relating to dependability, a generic concept including as special case such attributes as reliability, availability, safety, integrity, maintainability, etc. Security brings in concerns for confidentiality, in addition to availability and integrity. Basic definitions are given first. They are then commented upon, and supplemented by additional definitions, which address the threats to dependability and security (faults, errors, failures), their attributes, and the means for their achievement (fault prevention, fault tolerance, fault removal, fault forecasting). The aim is to explicate a set of general concepts, of relevance across a wide range of situations and, therefore, helping communication and cooperation among a number of scientific and technical communities, including ones that are concentrating on particular types of system, of system failures, or of causes of system failures}, www_section = {attack, fault removal, fault-tolerance, index terms-dependability, trust, vulnerability}, issn = {1545-5971}, doi = {10.1109/TDSC.2004.2}, url = {http://portal.acm.org/citation.cfm?id=1026488.1026492$\#$}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.88.2793.pdf}, }
@conference{1038318, title = {Vulnerabilities and Security Threats in Structured Overlay Networks: A Quantitative Analysis}, author = {Srivatsa, Mudhakar and Liu, Ling}, booktitle = {ACSAC '04: Proceedings of the 20th Annual Computer Security Applications Conference}, organization = {IEEE Computer Society}, year = {2004}, address = {Washington, DC, USA}, pages = {252--261}, publisher = {IEEE Computer Society}, abstract = {A number of recent applications have been built on distributed hash tables (DHTs) based overlay networks. Almost all DHT-based schemes employ a tight deterministic data placement and ID mapping schemes. This feature on one hand provides assurance on location of data if it exists, within a bounded number of hops, and on the other hand, opens doors for malicious nodes to lodge attacks that can potentially thwart the functionality of the overlay network. This paper studies several serious security threats in DHT-based systems through two targeted attacks at the overlay network's protocol layer. The first attack explores the routing anomalies that can be caused by malicious nodes returning incorrect lookup routes. The second attack targets the ID mapping scheme. We disclose that the malicious nodes can target any specific data item in the system; and corrupt/modify the data item to its favor. For each of these attacks, we provide quantitative analysis to estimate the extent of damage that can be caused by the attack; followed by experimental validation and defenses to guard the overlay networks from such attacks}, www_section = {distributed hash table, overlay networks, P2P}, isbn = {0-7695-2252-1}, doi = {10.1109/CSAC.2004.50}, url = {http://portal.acm.org/citation.cfm?id=1038254.1038318$\#$}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.3.1198.pdf}, }
@conference{1039861, title = {Burt: The Backup and Recovery Tool}, author = {Melski, Eric}, booktitle = {LISA '99: Proceedings of the 13th USENIX conference on System administration}, organization = {USENIX Association}, year = {1999}, address = {Berkeley, CA, USA}, pages = {207--218}, publisher = {USENIX Association}, abstract = {Burt is a freely distributed parallel network backup system written at the University of Wisconsin, Madison. It is designed to backup large heterogeneous networks. It uses the Tcl scripting language and standard backup programs like dump(1) and GNUTar to enable backups of a wide variety of data sources, including UNIX and Windows NT workstations, AFS based storage, and others. It also uses Tcl for the creation of the user interface, giving the system administrator great flexibility in customizing the system. Burt supports parallel backups to ensure high backup speeds, and checksums to ensure data integrity. The principal contribution of Burt is that it provides a powerful I/O engine within the context of a flexible scripting language; this combination enables graceful solutions to many problems associated with backups of large installations. At our site, we use Burt to backup data from 350 workstations and from our AFS servers, a total of approximately 900 GB every two weeks}, www_section = {backup}, url = {http://portal.acm.org/citation.cfm?id=1039861$\#$}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.112.7612.pdf}, }
@article{1042380, title = {Location Awareness in Unstructured Peer-to-Peer Systems}, author = {Yunhao Liu and Xiao, Li and Liu, Xiaomei and Ni, Lionel M. and Zhang, Xiaodong}, journal = {IEEE Trans. Parallel Distrib. Syst}, volume = {16}, number = {2}, year = {2005}, address = {Piscataway, NJ, USA}, pages = {163--174}, publisher = {IEEE Press}, abstract = {Peer-to-Peer (P2P) computing has emerged as a popular model aiming at further utilizing Internet information and resources. However, the mechanism of peers randomly choosing logical neighbors without any knowledge about underlying physical topology can cause a serious topology mismatch between the P2P overlay network and the physical underlying network. The topology mismatch problem brings great stress in the Internet infrastructure. It greatly limits the performance gain from various search or routing techniques. Meanwhile, due to the inefficient overlay topology, the flooding-based search mechanisms cause a large volume of unnecessary traffic. Aiming at alleviating the mismatching problem and reducing the unnecessary traffic, we propose a location-aware topology matching (LTM) technique. LTM builds an efficient overlay by disconnecting slow connections and choosing physically closer nodes as logical neighbors while still retaining the search scope and reducing response time for queries. LTM is scalable and completely distributed in the sense that it does not require any global knowledge of the whole overlay network. The effectiveness of LTM is demonstrated through simulation studies}, www_section = {flooding attacks, location-aware topology, P2P, search efficiency, topology matching}, issn = {1045-9219}, doi = {10.1109/TPDS.2005.21}, url = {http://portal.acm.org/citation.cfm?id=1042380$\#$}, }
@conference{1049775, title = {Scalable Service Discovery for MANET}, author = {Sailhan, Francoise and Valerie Issarny}, booktitle = {PERCOM '05: Proceedings of the Third IEEE International Conference on Pervasive Computing and Communications}, organization = {IEEE Computer Society}, year = {2005}, address = {Washington, DC, USA}, pages = {235--244}, publisher = {IEEE Computer Society}, abstract = {Mobile Ad hoc NETworks (MANETs) conveniently complement infrastructure-based networks, allowing mobile nodes to spontaneously form a network and share their services, including bridging with other networks, either infrastructure-based or ad hoc. However, distributed service provisioning over MANETs requires adequate support for service discovery and invocation, due to the network{\'y}s dynamics and resource constraints of wireless nodes. While a number of existing service discovery protocols have shown to be effective for the wireless environment, these are mainly aimed at infrastructure-based and/or 1-hop ad hoc wireless networks. Some discovery protocols for MANETs have been proposed over the last couple of years but they induce significant traffic overhead, and are thus primarily suited for small-scale MANETs with few nodes. Building upon the evaluation of existing protocols, we introduce a scalable service discovery protocol for MANETs, which is based on the homogeneous and dynamic deployment of cooperating directories within the network. Scalability of our protocol comes from the minimization of the generatedtraffic, and the use of compact directory summaries that enable to efficiently locate the directory that most likely caches the description of a given service}, www_section = {ad-hoc networks, mobile Ad-hoc networks}, isbn = {0-7695-2299-8}, doi = {10.1109/PERCOM.2005.36}, url = {http://portal.acm.org/citation.cfm?id=1049775$\#$}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.73.7247.pdf}, }
@article{1064217, title = {On lifetime-based node failure and stochastic resilience of decentralized peer-to-peer networks}, author = {Leonard, Derek and Rai, Vivek and Loguinov, Dmitri}, journal = {SIGMETRICS Perform. Eval. Rev}, volume = {33}, number = {1}, year = {2005}, address = {New York, NY, USA}, pages = {26--37}, publisher = {ACM}, abstract = {To understand how high rates of churn and random departure decisions of end-users affect connectivity of P2P networks, this paper investigates resilience of random graphs to lifetime-based node failure and derives the expected delay before a user is forcefully isolated from the graph and the probability that this occurs within his/her lifetime. Our results indicate that systems with heavy-tailed lifetime distributions are more resilient than those with light-tailed (e.g., exponential) distributions and that for a given average degree, k-regular graphs exhibit the highest resilience. As a practical illustration of our results, each user in a system with n = 100 billion peers, 30-minute average lifetime, and 1-minute node-replacement delay can stay connected to the graph with probability 1--1 n using only 9 neighbors. This is in contrast to 37 neighbors required under previous modeling efforts. We finish the paper by showing that many P2P networks are almost surely (i.e., with probability 1-o(1)) connected if they have no isolated nodes and derive a simple model for the probability that a P2P system partitions under churn}, www_section = {P2P, pareto, stochastic lifetime resilience}, issn = {0163-5999}, doi = {10.1145/1071690.1064217}, url = {http://portal.acm.org/citation.cfm?id=1071690.1064217$\#$}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.83.5920.pdf}, }
@article{1076, title = {New directions in cryptography}, author = {Whitfield Diffie and Martin E. Hellman}, journal = {IEEE Transactions on Information Theory}, volume = {22}, year = {1976}, month = {November}, pages = {644--654}, abstract = {Two kinds of contemporary developments in cryptography are examined. Widening applications of teleprocessing have given rise to a need for new types of cryptographic systems, which minimize the need for secure key distribution channels and supply the equivalent of a written signature. This paper suggests ways to solve these currently open problems. It also discusses how the theories of communication and computation are beginning to provide the tools to solve cryptographic problems of long standing}, www_section = {cryptographic systems, cryptography}, issn = {0018-9448}, doi = {10.1109/TIT.1976.1055638}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/IEEE\%20Trans.\%20on\%20Info.\%20-\%20New\%20directions\%20in\%20cryptography.pdf}, url = {https://bibliography.gnunet.org}, }
@conference{1080833, title = {Architecture and evaluation of an unplanned 802.11b mesh network}, author = {Bicket, John and Aguayo, Daniel and Biswas, Sanjit and Robert Morris}, booktitle = {MobiCom '05: Proceedings of the 11th annual international conference on Mobile computing and networking}, organization = {ACM}, year = {2005}, address = {New York, NY, USA}, pages = {31--42}, publisher = {ACM}, abstract = {This paper evaluates the ability of a wireless mesh architecture to provide high performance Internet access while demanding little deployment planning or operational management. The architecture considered in this paper has unplanned node placement (rather than planned topology), omni-directional antennas (rather than directional links), and multi-hop routing (rather than single-hop base stations). These design decisions contribute to ease of deployment, an important requirement for community wireless networks. However, this architecture carries the risk that lack of planning might render the network's performance unusably low. For example, it might be necessary to place nodes carefully to ensure connectivity; the omni-directional antennas might provide uselessly short radio ranges; or the inefficiency of multi-hop forwarding might leave some users effectively disconnected.The paper evaluates this unplanned mesh architecture with a case study of the Roofnet 802.11b mesh network. Roofnet consists of 37 nodes spread over four square kilometers of an urban area. The network provides users with usable performance despite lack of planning: the average inter-node throughput is 627 kbits/second, even though the average route has three hops.The paper evaluates multiple aspects of the architecture: the effect of node density on connectivity and throughput; the characteristics of the links that the routing protocol elects to use; the usefulness of the highly connected mesh afforded by omni-directional antennas for robustness and throughput; and the potential performance of a single-hop network using the same nodes as Roofnet}, www_section = {ad-hoc networks, mesh networks, multi-hop networks, route metrics, wireless routing}, isbn = {1-59593-020-5}, doi = {10.1145/1080829.1080833}, url = {http://portal.acm.org/citation.cfm?id=1080833$\#$}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.62.3119.pdf}, }
@conference{1090700, title = {Metadata Efficiency in Versioning File Systems}, author = {Soules, Craig A. N. and Goodson, Garth R. and Strunk, John D. and Ganger, Gregory R.}, booktitle = {FAST '03: Proceedings of the 2nd USENIX Conference on File and Storage Technologies}, organization = {USENIX Association}, year = {2003}, address = {Berkeley, CA, USA}, pages = {43--58}, publisher = {USENIX Association}, abstract = {Versioning file systems retain earlier versions of modified files, allowing recovery from user mistakes or system corruption. Unfortunately, conventional versioning systems do not efficiently record large numbers of versions. In particular, versioned metadata can consume as much space as versioned data. This paper examines two space-efficient metadata structures for versioning file systems and describes their integration into the Comprehensive Versioning File System (CVFS), which keeps all versions of all files. Journal-based metadata encodes each metadata version into a single journal entry; CVFS uses this structure for inodes and indirect blocks, reducing the associated space requirements by 80\%. Multiversion b-trees extend each entrys key with a timestamp and keep current and historical entries in a single tree; CVFS uses this structure for directories, reducing the associated space requirements by 99\%. Similar space reductions are predicted via trace analysis for other versioning strategies (e.g., on-close versioning). Experiments with CVFS verify that its current-version performance is sim-ilar to that of non-versioning file systems while reducing overall space needed for history data by a factor of two. Although access to historical versions is slower than con-ventional versioning systems, checkpointing is shown to mitigate and bound this effect}, www_section = {file systems}, url = {http://portal.acm.org/citation.cfm?id=1090694.1090700$\#$}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/fast03.pdf}, }
@article{1095816, title = {BAR fault tolerance for cooperative services}, author = {Aiyer, Amitanand S. and Lorenzo Alvisi and Clement, Allen and Dahlin, Mike and Martin, Jean-Philippe and Porth, Carl}, journal = {SIGOPS Oper. Syst. Rev}, volume = {39}, number = {5}, year = {2005}, address = {New York, NY, USA}, pages = {45--58}, publisher = {ACM}, abstract = {This paper describes a general approach to constructing cooperative services that span multiple administrative domains. In such environments, protocols must tolerate both Byzantine behaviors when broken, misconfigured, or malicious nodes arbitrarily deviate from their specification and rational behaviors when selfish nodes deviate from their specification to increase their local benefit. The paper makes three contributions: (1) It introduces the BAR (Byzantine, Altruistic, Rational) model as a foundation for reasoning about cooperative services; (2) It proposes a general three-level architecture to reduce the complexity of building services under the BAR model; and (3) It describes an implementation of BAR-B the first cooperative backup service to tolerate both Byzantine users and an unbounded number of rational users. At the core of BAR-B is an asynchronous replicated state machine that provides the customary safety and liveness guarantees despite nodes exhibiting both Byzantine and rational behaviors. Our prototype provides acceptable performance for our application: our BAR-tolerant state machine executes 15 requests per second, and our BAR-B backup service can back up 100MB of data in under 4 minutes}, www_section = {byzantine fault tolerance, game theory, reliability}, issn = {0163-5980}, doi = {10.1145/1095809.1095816}, url = {http://portal.acm.org/citation.cfm?id=1095816$\#$}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.80.713.pdf}, }
@conference{1095944, title = {Impacts of packet scheduling and packet loss distribution on FEC Performances: observations and recommendations}, author = {Christoph Neumann and Aur{\'e}lien Francillon and David Furodet}, booktitle = {CoNEXT'05: Proceedings of the 2005 ACM conference on Emerging network experiment and technology}, organization = {ACM Press}, year = {2005}, address = {New York, NY, USA}, pages = {166--176}, publisher = {ACM Press}, abstract = {Forward Error Correction (FEC) is commonly used for content broadcasting. The performance of the FEC codes largely vary, depending in particular on the code used and on the object size, and these parameters have already been studied in detail by the community. However the FEC performances are also largely dependent on the packet scheduling used during transmission and on the loss pattern introduced by the channel. Little attention has been devoted to these aspects so far. Therefore the present paper analyzes their impacts on the three FEC codes: LDGM Staircase, LDGM Triangle, two large block codes, and Reed-Solomon. Thanks to this analysis, we define several recommendations on how to best use these codes, depending on the test case and on the channel, which turns out to be of utmost importance}, www_section = {forward error correction, LDPC, loss pattern, multicast, packet scheduling, Reed-Solomon}, isbn = {1-59593-197-X}, doi = {10.1145/1095921.1095944}, url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.63.8807}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/RR-5578.pdf}, }
@conference{1096703, title = {Integrating Portable and Distributed Storage}, author = {Niraj Tolia and Harkes, Jan and Michael Kozuch and Satyanarayanan, Mahadev}, booktitle = {FAST '04: Proceedings of the 3rd USENIX Conference on File and Storage Technologies}, organization = {USENIX Association}, year = {2004}, address = {Berkeley, CA, USA}, pages = {227--238}, publisher = {USENIX Association}, abstract = {We describe a technique called lookaside caching that combines the strengths of distributed file systems and portable storage devices, while negating their weaknesses. In spite of its simplicity, this technique proves to be powerful and versatile. By unifying distributed storage and portable storage into a single abstraction, lookaside caching allows users to treat devices they carry as merely performance and availability assists for distant file servers. Careless use of portable storage has no catastrophic consequences. Experimental results show that significant performance improvements are possible even in the presence of stale data on the portable device}, www_section = {caching proxies, distributed database}, url = {http://portal.acm.org/citation.cfm?id=1096703$\#$}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/integratingpds-fast04.pdf}, }
@conference{1103797, title = {Hydra: a platform for survivable and secure data storage systems}, author = {Lihao Xu}, booktitle = {StorageSS '05: Proceedings of the 2005 ACM workshop on Storage security and survivability}, organization = {ACM}, year = {2005}, address = {New York, NY, USA}, pages = {108--114}, publisher = {ACM}, abstract = {This paper introduces Hydra, a platform that we are developing for highly survivable and secure data storage systems that distribute information over networks and adapt timely to environment changes, enabling users to store and access critical data in a continuously available and highly trustable fashion. The Hydra platform uses MDS array codes that can be encoded and decoded efficiently for distributing and recovering user data. Novel uses of MDS array codes in Hydra are discussed, as well as Hydra's design goals, general structures and a set of basic operations on user data. We also explore Hydra's applications in survivable and secure data storage systems}, www_section = {storage}, isbn = {1-59593-233-X}, doi = {10.1145/1103780.1103797}, url = {http://portal.acm.org/citation.cfm?id=1103797$\#$}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/w8paper13.pdf}, }
@conference{1108067, title = {Boundary Chord: A Novel Peer-to-Peer Algorithm for Replica Location Mechanism in Grid Environment}, author = {Jin, Hai and Wang, Chengwei and Chen, Hanhua}, booktitle = {ISPAN '05: Proceedings of the 8th International Symposium on Parallel Architectures,Algorithms and Networks}, organization = {IEEE Computer Society}, year = {2005}, address = {Washington, DC, USA}, pages = {262--267}, publisher = {IEEE Computer Society}, abstract = {The emerging grids need an efficient replica location mechanism. In the experience of developing 1 ChinaGrid Supporting Platform (CGSP), a grid middleware that builds a uniform platform supporting multiple grid-based applications, we meet a challenge of utilizing the properties of locality in replica location process to construct a practical and high performance replica location mechanism. The key of the solution to this challenge is to design an efficient replica location algorithm that meets above requirements. Some previous works have been done to build a replica location mechanism, but they are not suitable for replica location in a grid environment with multiple applications like ChinaGrid. In this paper, we present a novel peer-to-peer algorithm for replica location mechanism, Boundary Chord, which has the merits of locality awareness, self-organization, and load balancing. Simulation results show that the algorithm has better performance than other structured peer-to-peer solutions to the replica location problem}, isbn = {0-7695-2509-1}, doi = {10.1109/ISPAN.2005.21}, url = {http://portal.acm.org/citation.cfm?id=1108067$\#$}, %%%%% ERROR: Missing field % www_section = {?????}, }
@conference{1109601, title = {The rainbow skip graph: a fault-tolerant constant-degree distributed data structure}, author = {Goodrich, Michael T. and Nelson, Michael J. and Sun, Jonathan Z.}, booktitle = {SODA '06: Proceedings of the seventeenth annual ACM-SIAM symposium on Discrete algorithm}, organization = {ACM}, year = {2006}, address = {New York, NY, USA}, pages = {384--393}, publisher = {ACM}, abstract = {We present a distributed data structure, which we call the rainbow skip graph. To our knowledge, this is the first peer-to-peer data structure that simultaneously achieves high fault-tolerance, constant-sized nodes, and fast update and query times for ordered data. It is a non-trivial adaptation of the SkipNet/skip-graph structures of Harvey et al. and Aspnes and Shah, so as to provide fault-tolerance as these structures do, but to do so using constant-sized nodes, as in the family tree structure of Zatloukal and Harvey. It supports successor queries on a set of n items using O(log n) messages with high probability, an improvement over the expected O(log n) messages of the family tree. Our structure achieves these results by using the following new constructs:{\textbullet} Rainbow connections: parallel sets of pointers between related components of nodes, so as to achieve good connectivity between "adjacent" components, using constant-sized nodes.{\textbullet} Hydra components: highly-connected, highly fault-tolerant components of constant-sized nodes, which will contain relatively large connected subcomponents even under the failure of a constant fraction of the nodes in the component.We further augment the hydra components in the rainbow skip graph by using erasure-resilient codes to ensure that any large subcomponent of nodes in a hydra component is sufficient to reconstruct all the data stored in that component. By carefully maintaining the size of related components and hydra components to be O(log n), we are able to achieve fast times for updates and queries in the rainbow skip graph. In addition, we show how to make the communication complexity for updates and queries be worst case, at the expense of more conceptual complexity and a slight degradation in the node congestion of the data structure}, www_section = {distributed hash table, Hydra, rainbow, RSG, skip graph, SkipNet}, isbn = {0-89871-605-5}, doi = {http://doi.acm.org/10.1145/1109557.1109601}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/rainbow.pdf}, }
@conference{1111777, title = {Data durability in peer to peer storage systems}, author = {Gil Utard and Antoine Vernois}, booktitle = {CCGRID '04: Proceedings of the 2004 IEEE International Symposium on Cluster Computing and the Grid}, organization = {IEEE Computer Society}, year = {2004}, address = {Washington, DC, USA}, pages = {90--97}, publisher = {IEEE Computer Society}, abstract = {In this paper we present a quantitative study of data survival in peer to peer storage systems. We first recall two main redundancy mechanisms: replication and erasure codes, which are used by most peer to peer storage systems like OceanStore, PAST or CFS, to guarantee data durability. Second we characterize peer to peer systems according to a volatility factor (a peer is free to leave the system at anytime) and to an availability factor (a peer is not permanently connected to the system). Third we model the behavior of a system as a Markov chain and analyse the average life time of data (MTTF) according to the volatility and availability factors. We also present the cost of the repair process based on these redundancy schemes to recover failed peers. The conclusion of this study is that when there is no high availability of peers, a simple replication scheme may be more efficient than sophisticated erasure codes}, www_section = {P2P, redundancy, storage}, isbn = {0-7803-8430-X}, url = {http://portal.acm.org/citation.cfm?id=1111777$\#$}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.102.9992.pdf}, }
@conference{1128335, title = {OmniStore: A system for ubiquitous personal storage management}, author = {Alexandros Karypidis and Spyros Lalis}, booktitle = {PERCOM '06: Proceedings of the Fourth Annual IEEE International Conference on Pervasive Computing and Communications}, organization = {IEEE Computer Society}, year = {2006}, address = {Washington, DC, USA}, pages = {136--147}, publisher = {IEEE Computer Society}, abstract = {As personal area networking becomes a reality, the collective management of storage in portable devices such as mobile phones, cameras and music players will grow in importance. The increasing wireless communication capability of such devices makes it possible for them to interact with each other and implement more advanced storage functionality. This paper introduces OmniStore, a system which employs a unified data management approach that integrates portable and backend storage, but also exhibits self-organizing behavior through spontaneous device collaboration}, isbn = {0-7695-2518-0}, doi = {10.1109/PERCOM.2006.40}, url = {http://portal.acm.org/citation.cfm?id=1128335$\#$}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.96.4283.pdf}, %%%%% ERROR: Missing field % www_section = {?????}, }
@conference{1133613, title = {Defending against eclipse attacks on overlay networks}, author = {Singh, Atul and Miguel Castro and Peter Druschel and Antony Rowstron}, booktitle = {EW 11: Proceedings of the 11th workshop on ACM SIGOPS European workshop}, organization = {ACM}, year = {2004}, address = {New York, NY, USA}, pages = {0--21}, publisher = {ACM}, abstract = {Overlay networks are widely used to deploy functionality at edge nodes without changing network routers. Each node in an overlay network maintains pointers to a set of neighbor nodes. These pointers are used both to maintain the overlay and to implement application functionality, for example, to locate content stored by overlay nodes. If an attacker controls a large fraction of the neighbors of correct nodes, it can "eclipse" correct nodes and prevent correct overlay operation. This Eclipse attack is more general than the Sybil attack. Attackers can use a Sybil attack to launch an Eclipse attack by inventing a large number of seemingly distinct overlay nodes. However, defenses against Sybil attacks do not prevent Eclipse attacks because attackers may manipulate the overlay maintenance algorithm to mount an Eclipse attack. This paper discusses the impact of the Eclipse attack on several types of overlay and it proposes a novel defense that prevents the attack by bounding the degree of overlay nodes. Our defense can be applied to any overlay and it enables secure implementations of overlay optimizations that choose neighbors according to metrics like proximity. We present preliminary results that demonstrate the importance of defending against the Eclipse attack and show that our defense is effective}, www_section = {attack, overlay networks}, doi = {10.1145/1133572.1133613}, url = {http://portal.acm.org/citation.cfm?id=1133572.1133613$\#$}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.61.5727.pdf}, }
@conference{1143660, title = {Estimation based erasure-coding routing in delay tolerant networks}, author = {Liao, Yong and Tan, Kun and Zhang, Zhensheng and Gao, Lixin}, booktitle = {IWCMC '06: Proceedings of the 2006 international conference on Wireless communications and mobile computing}, organization = {ACM}, year = {2006}, address = {New York, NY, USA}, pages = {557--562}, publisher = {ACM}, abstract = {Wireless Delay Tolerant Networks (DTNs) are intermittently connected mobile wireless networks. Some well-known assumptions of traditional networks are no longer true in DTNs, which makes routing in DTNs a challenging problem. We observe that mobile nodes in realistic wireless DTNs may always have some mobility pattern information which can be used to estimate one node's ability to deliver a specific message. This estimation can greatly enhance the routing performance in DTNs. Furthermore, we adopt an alternative way to generate redundancy using erasure coding. With a fixed overhead, the erasure coding can generate a large number of message-blocks instead of a few replications, and therefore it allows the transmission of only a portion of message to a relay. This can greatly increase the routing diversity when combined with estimation-based approaches. We have conducted extensive simulations to evaluate the performance of our scheme. The results demonstrate that our scheme outperforms previously proposed schemes}, www_section = {delay tolerant network}, isbn = {1-59593-306-9}, doi = {10.1145/1143549.1143660}, url = {http://portal.acm.org/citation.cfm?id=1143549.1143660$\#$}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.61.249.pdf}, }
@conference{1143821, title = {A distributed data caching framework for mobile ad hoc networks}, author = {Wang, Ying-Hong and Chao, Chih-Feng and Lin, Shih-Wei and Chen, Wei-Ting}, booktitle = {IWCMC '06: Proceedings of the 2006 international conference on Wireless communications and mobile computing}, organization = {ACM}, year = {2006}, address = {New York, NY, USA}, pages = {1357--1362}, publisher = {ACM}, abstract = {Mobile ad hoc networks (MANETs), enabling multi-hop communication between mobile nodes, are characterized by variable network topology and the demand for efficient dynamic routing protocols. MANETs need no stationary infrastructure or preconstructed base station to coordinate packet transmissions or to advertise information of network topology for mobile nodes. The objective of this paper is to provide MANETs with a distributed data caching framework, which could cache the repetition of data and data path, shorten routes and time span to access data, and enhance data reusable rate to further reduce the use of bandwidth and the consumption of power}, www_section = {mobile Ad-hoc networks}, isbn = {1-59593-306-9}, doi = {10.1145/1143549.1143821}, url = {http://portal.acm.org/citation.cfm?id=1143821$\#$}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.103.426.pdf}, }
@article{1148681, title = {Raptor codes}, author = {M. Amin Shokrollahi}, journal = {IEEE/ACM Trans. Netw}, volume = {14}, number = {SI}, year = {2006}, address = {Piscataway, NJ, USA}, pages = {2551--2567}, publisher = {IEEE Press}, abstract = {LT-codes are a new class of codes introduced by Luby for the purpose of scalable and fault-tolerant distribution of data over computer networks. In this paper, we introduce Raptor codes, an extension of LT-codes with linear time encoding and decoding. We will exhibit a class of universal Raptor codes: for a given integer k and any real {\epsilon} > 0, Raptor codes in this class produce a potentially infinite stream of symbols such that any subset of symbols of size k(1 + {\epsilon}) is sufficient to recover the original k symbols with high probability. Each output symbol is generated using O(log(1/ {\epsilon})) operations, and the original symbols are recovered from the collected ones with O(k log(1/{\epsilon})) operations.We will also introduce novel techniques for the analysis of the error probability of the decoder for finite length Raptor codes. Moreover, we will introduce and analyze systematic versions of Raptor codes, i.e., versions in which the first output elements of the coding system coincide with the original k elements}, www_section = {802.11, encoding, erasure coding}, issn = {1063-6692}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/raptor.pdf}, }
@article{1151692, title = {Energy-aware lossless data compression}, author = {Kenneth Barr and Asanovi{\'c}, Krste}, journal = {ACM Trans. Comput. Syst}, volume = {24}, number = {3}, year = {2006}, month = {January}, address = {New York, NY, USA}, pages = {250--291}, publisher = {ACM}, abstract = {Wireless transmission of a single bit can require over 1000 times more energy than a single computation. It can therefore be beneficial to perform additional computation to reduce the number of bits transmitted. If the energy required to compress data is less than the energy required to send it, there is a net energy savings and an increase in battery life for portable computers. This article presents a study of the energy savings possible by losslessly compressing data prior to transmission. A variety of algorithms were measured on a StrongARM SA-110 processor. This work demonstrates that, with several typical compression algorithms, there is a actually a net energy increase when compression is applied before transmission. Reasons for this increase are explained and suggestions are made to avoid it. One such energy-aware suggestion is asymmetric compression, the use of one compression algorithm on the transmit side and a different algorithm for the receive path. By choosing the lowest-energy compressor and decompressor on the test platform, overall energy to send and receive data can be reduced by 11\% compared with a well-chosen symmetric pair, or up to 57\% over the default symmetric zlib scheme}, www_section = {compression, energy-aware, lossless}, issn = {0734-2071}, doi = {10.1145/1151690.1151692}, url = {http://portal.acm.org/citation.cfm?id=1151692$\#$}, }
@conference{1157518, title = {iDIBS: An Improved Distributed Backup System}, author = {Morcos, Faruck and Chantem, Thidapat and Little, Philip and Gasiba, Tiago and Thain, Douglas}, booktitle = {ICPADS '06: Proceedings of the 12th International Conference on Parallel and Distributed Systems}, organization = {IEEE Computer Society}, year = {2006}, address = {Washington, DC, USA}, pages = {58--67}, publisher = {IEEE Computer Society}, abstract = {iDIBS is a peer-to-peer backup system which optimizes the Distributed Internet Backup System (DIBS). iDIBS offers increased reliability by enhancing the robustness of existing packet transmission mechanism. Reed-Solomon erasure codes are replaced with Luby Transform codes to improve computation speed and scalability of large files. Lists of peers are automatically stored onto nodes to reduce recovery time. To realize these optimizations, an acceptable amount of data overhead and an increase in network utilization are imposed on the iDIBS system. Through a variety of experiments, we demonstrate that iDIBS significantly outperforms DIBS in the areas of data computational complexity, backup reliability, and overall performance}, www_section = {backup, P2P, reliability}, isbn = {0-7695-2612-8}, doi = {10.1109/ICPADS.2006.52}, url = {http://portal.acm.org/citation.cfm?id=1156431.1157518$\#$}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.94.4826.pdf}, }
@conference{1158641, title = {Security Considerations in Space and Delay Tolerant Networks}, author = {Farrell, Stephen and Cahill, Vinny}, booktitle = {SMC-IT '06: Proceedings of the 2nd IEEE International Conference on Space Mission Challenges for Information Technology}, organization = {IEEE Computer Society}, year = {2006}, address = {Washington, DC, USA}, pages = {29--38}, publisher = {IEEE Computer Society}, abstract = {This paper reviews the Internet-inspired security work on delay tolerant networking, in particular, as it might apply to space missions, and identifies some challenges arising, for both the Internet security community and for space missions. These challenges include the development of key management schemes suited for space missions as well as a characterization of the actual security requirements applying. A specific goal of this paper is therefore to elicit feedback from space mission IT specialists in order to guide the development of security mechanisms for delay tolerant networking}, isbn = {0-7695-2644-6}, doi = {10.1109/SMC-IT.2006.66}, url = {http://portal.acm.org/citation.cfm?id=1158336.1158641$\#$}, %%%%% ERROR: Missing field % www_section = {?????}, }
@article{1159937, title = {Building an AS-topology model that captures route diversity}, author = {M{\"u}hlbauer, Wolfgang and Feldmann, Anja and Maennel, Olaf and Roughan, Matthew and Uhlig, Steve}, journal = {SIGCOMM Comput. Commun. Rev}, volume = {36}, number = {4}, year = {2006}, address = {New York, NY, USA}, pages = {195--206}, publisher = {ACM}, abstract = {An understanding of the topological structure of the Internet is needed for quite a number of networking tasks, e. g., making decisions about peering relationships, choice of upstream providers, inter-domain traffic engineering. One essential component of these tasks is the ability to predict routes in the Internet. However, the Internet is composed of a large number of independent autonomous systems (ASes) resulting in complex interactions, and until now no model of the Internet has succeeded in producing predictions of acceptable accuracy.We demonstrate that there are two limitations of prior models: (i) they have all assumed that an Autonomous System (AS) is an atomic structure--it is not, and (ii) models have tended to oversimplify the relationships between ASes. Our approach uses multiple quasi-routers to capture route diversity within the ASes, and is deliberately agnostic regarding the types of relationships between ASes. The resulting model ensures that its routing is consistent with the observed routes. Exploiting a large number of observation points, we show that our model provides accurate predictions for unobserved routes, a first step towards developing structural mod-els of the Internet that enable real applications}, www_section = {border gateway protocol, inter-domain routing, route diversity, routing}, issn = {0146-4833}, doi = {10.1145/1151659.1159937}, url = {http://portal.acm.org/citation.cfm?id=1159937$\#$}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/BuildingAnASTopologyModel.pdf}, }
@conference{1161264, title = {Performance evaluation of chord in mobile ad hoc networks}, author = {Cramer, Curt and Thomas Fuhrmann}, booktitle = {MobiShare '06: Proceedings of the 1st international workshop on Decentralized resource sharing in mobile computing and networking}, organization = {ACM}, year = {2006}, address = {New York, NY, USA}, pages = {48--53}, publisher = {ACM}, abstract = {Mobile peer-to-peer applications recently have received growing interest. However, it is often assumed that structured peer-to-peer overlays cannot efficiently operate in mobile ad hoc networks (MANETs). The prevailing opinion is that this is due to the protocols' high overhead cost. In this paper, we show that this opinion is misguided.We present a thorough simulation study evaluating Chord in the well-known MANET simulator GloMoSim. We found the main issue of deploying Chord in a MANET not to be its overhead, but rather the protocol's pessimistic timeout and failover strategy. This strategy enables fast lookup resolution in spite of highly dynamic node membership, which is a significant problem in the Internet context. However, with the inherently higher packet loss rate in a MANET, this failover strategy results in lookups being inconsistently forwarded even if node membership does not change}, www_section = {Chord, mobile Ad-hoc networks}, isbn = {1-59593-558-4}, doi = {10.1145/1161252.1161264}, url = {http://portal.acm.org/citation.cfm?id=1161264$\#$}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/p48-cramer_ACM2006.pdf}, }
@conference{1170307, title = {Storage Tradeoffs in a Collaborative Backup Service for Mobile Devices}, author = {Ludovic Court{\`e}s and Killijian, Marc-Olivier and Powell, David}, booktitle = {EDCC '06: Proceedings of the Sixth European Dependable Computing Conference}, organization = {IEEE Computer Society}, year = {2006}, address = {Washington, DC, USA}, pages = {129--138}, publisher = {IEEE Computer Society}, abstract = {Mobile devices are increasingly relied on but are used in contexts that put them at risk of physical dam- age, loss or theft. We consider a fault-tolerance ap- proach that exploits spontaneous interactions to imple- ment a collaborative backup service. We define the con- straints implied by the mobile environment,analyze how they translate into the storage layer of such a backup system and examine various design options. The paper concludes with a presentation of our prototype imple- mentation of the storage layer, an evaluation of the im- pact of several compression methods,and directions for future work}, isbn = {0-7695-2648-9}, doi = {10.1109/EDCC.2006.26}, url = {http://portal.acm.org/citation.cfm?id=1170307$\#$}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/slides.pdf}, %%%%% ERROR: Missing field % www_section = {?????}, }
@article{1217937, title = {Fireflies: scalable support for intrusion-tolerant network overlays}, author = {H{\r a}vard Johansen and Allavena, Andr{\'e} and Robbert Van Renesse}, journal = {SIGOPS Oper. Syst. Rev}, volume = {40}, number = {4}, year = {2006}, address = {New York, NY, USA}, pages = {3--13}, publisher = {ACM}, abstract = {This paper describes and evaluates Fireflies, a scalable protocol for supporting intrusion-tolerant network overlays. While such a protocol cannot distinguish Byzantine nodes from correct nodes in general, Fireflies provides correct nodes with a reasonably current view of which nodes are live, as well as a pseudo-random mesh for communication. The amount of data sent by correct nodes grows linearly with the aggregate rate of failures and recoveries, even if provoked by Byzantine nodes. The set of correct nodes form a connected submesh; correct nodes cannot be eclipsed by Byzantine nodes. Fireflies is deployed and evaluated on PlanetLab}, issn = {0163-5980}, doi = {10.1145/1218063.1217937}, url = {http://portal.acm.org/citation.cfm?id=1218063.1217937$\#$}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Fireflies.pdf}, %%%%% ERROR: Missing field % www_section = {?????}, }
@article{1217950, title = {Experiences in building and operating ePOST, a reliable peer-to-peer application}, author = {Mislove, Alan and Post, Ansley and Haeberlen, Andreas and Peter Druschel}, journal = {SIGOPS Oper. Syst. Rev}, volume = {40}, number = {4}, year = {2006}, address = {New York, NY, USA}, pages = {147--159}, publisher = {ACM}, abstract = {Peer-to-peer (p2p) technology can potentially be used to build highly reliable applications without a single point of failure. However, most of the existing applications, such as file sharing or web caching, have only moderate reliability demands. Without a challenging proving ground, it remains unclear whether the full potential of p2p systems can be realized.To provide such a proving ground, we have designed, deployed and operated a p2p-based email system. We chose email because users depend on it for their daily work and therefore place high demands on the availability and reliability of the service, as well as the durability, integrity, authenticity and privacy of their email. Our system, ePOST, has been actively used by a small group of participants for over two years.In this paper, we report the problems and pitfalls we encountered in this process. We were able to address some of them by applying known principles of system design, while others turned out to be novel and fundamental, requiring us to devise new solutions. Our findings can be used to guide the design of future reliable p2p systems and provide interesting new directions for future research}, www_section = {P2P}, issn = {0163-5980}, doi = {10.1145/1218063.1217950}, url = {http://portal.acm.org/citation.cfm?id=1218063.1217950$\#$}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/epost-eurosys2006.pdf}, }
@conference{1247343, title = {A cooperative internet backup scheme}, author = {Mark Lillibridge and Sameh Elnikety and Andrew D. Birrell and Mike Burrows and Isard, Michael}, booktitle = {ATEC '03: Proceedings of the annual conference on USENIX Annual Technical Conference}, organization = {USENIX Association}, year = {2003}, address = {Berkeley, CA, USA}, pages = {3--3}, publisher = {USENIX Association}, abstract = {We present a novel peer-to-peer backup technique that allows computers connected to the Internet to back up their data cooperatively: Each computer has a set of partner computers, which collectively hold its backup data. In return, it holds a part of each partner's backup data. By adding redundancy and distributing the backup data across many partners, a highly-reliable backup can be obtained in spite of the low reliability of the average Internet machine. Because our scheme requires cooperation, it is potentially vulnerable to several novel attacks involving free riding (e.g., holding a partner's data is costly, which tempts cheating) or disruption. We defend against these attacks using a number of new methods, including the use of periodic random challenges to ensure partners continue to hold data and the use of disk-space wasting to make cheating unprofitable. Results from an initial prototype show that our technique is feasible and very inexpensive: it appears to be one to two orders of magnitude cheaper than existing Internet backup services}, www_section = {backup, P2P, redundancy}, url = {http://portal.acm.org/citation.cfm?id=1247343$\#$}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/lillibridge.pdf}, }
@conference{1247420, title = {Redundancy elimination within large collections of files}, author = {Kulkarni, Purushottam and Douglis, Fred and Jason Lavoie and Tracey, John M.}, booktitle = {ATEC '04: Proceedings of the annual conference on USENIX Annual Technical Conference}, organization = {USENIX Association}, year = {2004}, address = {Berkeley, CA, USA}, pages = {5--5}, publisher = {USENIX Association}, abstract = {Ongoing advancements in technology lead to ever-increasing storage capacities. In spite of this, optimizing storage usage can still provide rich dividends. Several techniques based on delta-encoding and duplicate block suppression have been shown to reduce storage overheads, with varying requirements for resources such as computation and memory. We propose a new scheme for storage reduction that reduces data sizes with an effectiveness comparable to the more expensive techniques, but at a cost comparable to the faster but less effective ones. The scheme, called Redundancy Elimination at the Block Level (REBL), leverages the benefits of compression, duplicate block suppression, and delta-encoding to eliminate a broad spectrum of redundant data in a scalable and efficient manner. REBL generally encodes more compactly than compression (up to a factor of 14) and a combination of compression and duplicate suppression (up to a factor of 6.7). REBL also encodes similarly to a technique based on delta-encoding, reducing overall space significantly in one case. Furthermore, REBL uses super-fingerprints, a technique that reduces the data needed to identify similar blocks while dramatically reducing the computational requirements of matching the blocks: it turns O(n2) comparisons into hash table lookups. As a result, using super-fingerprints to avoid enumerating matching data objects decreases computation in the resemblance detection phase of REBL by up to a couple orders of magnitude}, url = {http://portal.acm.org/citation.cfm?id=1247420$\#$}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.91.8331.pdf}, %%%%% ERROR: Missing field % www_section = {?????}, }
@article{1250746, title = {Valgrind: a framework for heavyweight dynamic binary instrumentation}, author = {Nethercote, Nicholas and Seward, Julian}, journal = {SIGPLAN Not}, volume = {42}, number = {6}, year = {2007}, address = {New York, NY, USA}, pages = {89--100}, publisher = {ACM}, abstract = {Dynamic binary instrumentation (DBI) frameworks make it easy to build dynamic binary analysis (DBA) tools such as checkers and profilers. Much of the focus on DBI frameworks has been on performance; little attention has been paid to their capabilities. As a result, we believe the potential of DBI has not been fully exploited. In this paper we describe Valgrind, a DBI framework designed for building heavyweight DBA tools. We focus on its unique support for shadow values-a powerful but previously little-studied and difficult-to-implement DBA technique, which requires a tool to shadow every register and memory value with another value that describes it. This support accounts for several crucial design features that distinguish Valgrind from other DBI frameworks. Because of these features, lightweight tools built with Valgrind run comparatively slowly, but Valgrind can be used to build more interesting, heavyweight tools that are difficult or impossible to build with other DBI frameworks such as Pin and DynamoRIO}, www_section = {dynamic binary instrumentation}, issn = {0362-1340}, doi = {10.1145/1273442.1250746}, url = {http://portal.acm.org/citation.cfm?id=1250746}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.108.4263.pdf}, }
@conference{1251057, title = {An analysis of compare-by-hash}, author = {Henson, Val}, booktitle = {HOTOS'03: Proceedings of the 9th conference on Hot Topics in Operating Systems}, organization = {USENIX Association}, year = {2003}, address = {Berkeley, CA, USA}, pages = {3--3}, publisher = {USENIX Association}, abstract = {Recent research has produced a new and perhaps dangerous technique for uniquely identifying blocks that I will call compare-by-hash. Using this technique, we decide whether two blocks are identical to each other by comparing their hash values, using a collision-resistant hash such as SHA-1[5]. If the hash values match, we assume the blocks are identical without further ado. Users of compare-by-hash argue that this assumption is warranted because the chance of a hash collision between any two randomly generated blocks is estimated to be many orders of magnitude smaller than the chance of many kinds of hardware errors. Further analysis shows that this approach is not as risk-free as it seems at first glance}, url = {http://portal.acm.org/citation.cfm?id=1251057$\#$}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.100.8338.pdf}, %%%%% ERROR: Missing field % www_section = {?????}, }
@conference{1251194, title = {Operating system support for planetary-scale network services}, author = {Bavier, Andy and Bowman, Mic and Chun, Brent and Culler, David and Karlin, Scott and Muir, Steve and Peterson, Larry and Roscoe, Timothy and Spalink, Tammo and Wawrzoniak, Mike}, booktitle = {NSDI'04: Proceedings of the 1st conference on Symposium on Networked Systems Design and Implementation}, organization = {USENIX Association}, year = {2004}, address = {Berkeley, CA, USA}, pages = {19--19}, publisher = {USENIX Association}, abstract = {PlanetLab is a geographically distributed overlay network designed to support the deployment and evaluation of planetary-scale network services. Two high-level goals shape its design. First, to enable a large research community to share the infrastructure, PlanetLab provides distributed virtualization, whereby each service runs in an isolated slice of PlanetLab's global resources. Second, to support competition among multiple network services, PlanetLab decouples the operating system running on each node from the network-wide services that define PlanetLab, a principle referred to as unbundled management. This paper describes how Planet-Lab realizes the goals of distributed virtualization and unbundled management, with a focus on the OS running on each node}, www_section = {overlay networks}, url = {http://portal.acm.org/citation.cfm?id=1251175.1251194$\#$}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/plos_nsdi_04.pdf}, }
@conference{1251195, title = {MACEDON: methodology for automatically creating, evaluating, and designing overlay networks}, author = {Rodriguez, Adolfo and Killian, Charles and Bhat, Sooraj and Kosti{\'c}, Dejan and Vahdat, Amin}, booktitle = {NSDI'04: Proceedings of the 1st conference on Symposium on Networked Systems Design and Implementation}, organization = {USENIX Association}, year = {2004}, address = {Berkeley, CA, USA}, pages = {20--20}, publisher = {USENIX Association}, abstract = {Currently, researchers designing and implementing large-scale overlay services employ disparate techniques at each stage in the production cycle: design, implementation, experimentation, and evaluation. As a result, complex and tedious tasks are often duplicated leading to ineffective resource use and difficulty in fairly comparing competing algorithms. In this paper, we present MACEDON, an infrastructure that provides facilities to: i) specify distributed algorithms in a concise domain-specific language; ii) generate code that executes in popular evaluation infrastructures and in live networks; iii) leverage an overlay-generic API to simplify the interoperability of algorithm implementations and applications; and iv) enable consistent experimental evaluation. We have used MACEDON to implement and evaluate a number of algorithms, including AMMO, Bullet, Chord, NICE, Overcast, Pastry, Scribe, and SplitStream, typically with only a few hundred lines of MACEDON code. Using our infrastructure, we are able to accurately reproduce or exceed published results and behavior demonstrated by current publicly available implementations}, url = {http://portal.acm.org/citation.cfm?id=1251175.1251195$\#$}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.2.8796.pdf}, %%%%% ERROR: Missing field % www_section = {?????}, }
@conference{1251207, title = {Detecting BGP configuration faults with static analysis}, author = {Nick Feamster and Hari Balakrishnan}, booktitle = {NSDI'05: Proceedings of the 2nd conference on Symposium on Networked Systems Design \& Implementation}, organization = {USENIX Association}, year = {2005}, address = {Berkeley, CA, USA}, pages = {43--56}, publisher = {USENIX Association}, abstract = {The Internet is composed of many independent autonomous systems (ASes) that exchange reachability information to destinations using the Border Gateway Protocol (BGP). Network operators in each AS configure BGP routers to control the routes that are learned, selected, and announced to other routers. Faults in BGP configuration can cause forwarding loops, packet loss, and unintended paths between hosts, each of which constitutes a failure of the Internet routing infrastructure. This paper describes the design and implementation of rcc, the router configuration checker, a tool that finds faults in BGP configurations using static analysis. rcc detects faults by checking constraints that are based on a high-level correctness specification. rcc detects two broad classes of faults: route validity faults, where routers may learn routes that do not correspond to usable paths, and path visibility faults, where routers may fail to learn routes for paths that exist in the network. rcc enables network operators to test and debug configurations before deploying them in an operational network, improving on the status quo where most faults are detected only during operation. rcc has been downloaded by more than sixty-five network operators to date, some of whom have shared their configurations with us. We analyze network-wide configurations from 17 different ASes to detect a wide variety of faults and use these findings to motivate improvements to the Internet routing infrastructure}, www_section = {autonomous systems, border gateway protocol}, url = {http://portal.acm.org/citation.cfm?id=1251207$\#$}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.113.5668.pdf}, }
@conference{1251279, title = {Energy-efficiency and storage flexibility in the blue file system}, author = {Nightingale, Edmund B. and Flinn, Jason}, booktitle = {OSDI'04: Proceedings of the 6th conference on Symposium on Opearting Systems Design \& Implementation}, organization = {USENIX Association}, year = {2004}, address = {Berkeley, CA, USA}, pages = {25--25}, publisher = {USENIX Association}, abstract = {A fundamental vision driving pervasive computing research is access to personal and shared data anywhere at anytime. In many ways, this vision is close to being realized. Wireless networks such as 802.11 offer connectivity to small, mobile devices. Portable storage, such as mobile disks and USB keychains, let users carry several gigabytes of data in their pockets. Yet, at least three substantial barriers to pervasive data access remain. First, power-hungry network and storage devices tax the limited battery capacity of mobile computers. Second, the danger of viewing stale data or making inconsistent updates grows as objects are replicated across more computers and portable storage devices. Third, mobile data access performance can suffer due to variable storage access times caused by dynamic power management, mobility, and use of heterogeneous storage devices. To overcome these barriers, we have built a new distributed file system called BlueFS. Compared to the Coda file system, BlueFS reduces file system energy usage by up to 55\% and provides up to 3 times faster access to data replicated on portable storage}, www_section = {802.11, file systems}, url = {http://portal.acm.org/citation.cfm?id=1251279$\#$}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/nightingale-bluefs2004.pdf}, }
@conference{1251470, title = {Symphony: distributed hashing in a small world}, author = {Manku, Gurmeet Singh and Bawa, Mayank and Prabhakar Raghavan}, booktitle = {USITS'03: Proceedings of the 4th conference on USENIX Symposium on Internet Technologies and Systems}, organization = {USENIX Association}, year = {2003}, address = {Berkeley, CA, USA}, pages = {10--10}, publisher = {USENIX Association}, abstract = {We present Symphony, a novel protocol for maintaining distributed hash tables in a wide area network. The key idea is to arrange all participants along a ring and equip them with long distance contacts drawn from a family of harmonic distributions. Through simulation, we demonstrate that our construction is scalable, flexible, stable in the presence of frequent updates and offers small average latency with only a handful of long distance links per node. The cost of updates when hosts join and leave is small}, www_section = {small-world}, url = {http://portal.acm.org/citation.cfm?id=1251460.1251470$\#$}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/manku03symphony.pdf}, }
@conference{1251532, title = {Non-transitive connectivity and DHTs}, author = {Michael J. Freedman and Lakshminarayanan, Karthik and Rhea, Sean C. and Ion Stoica}, booktitle = {WORLDS'05: Proceedings of the 2nd conference on Real, Large Distributed Systems}, organization = {USENIX Association}, year = {2005}, address = {Berkeley, CA, USA}, pages = {55--60}, publisher = {USENIX Association}, abstract = {The most basic functionality of a distributed hash table, or DHT, is to partition a key space across the set of nodes in a distributed system such that all nodes agree on the partitioning. For example, the Chord DHT assigns each node}, www_section = {Chord, distributed hash table}, url = {http://portal.acm.org/citation.cfm?id=1251532$\#$}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/ntr-worlds05.pdf}, }
@conference{1267093, title = {File system design for an NFS file server appliance}, author = {Hitz, Dave and Lau, James and Malcolm, Michael}, booktitle = {WTEC'94: Proceedings of the USENIX Winter 1994 Technical Conference on USENIX Winter 1994 Technical Conference}, organization = {USENIX Association}, year = {1994}, address = {Berkeley, CA, USA}, pages = {19--19}, publisher = {USENIX Association}, abstract = {Network Appliance Corporation recently began shipping a new kind of network server called an NFS file server appliance, which is a dedicated server whose sole function is to provide NFS file service. The file system requirements for an NFS appliance are different from those for a general-purpose UNIX system, both because an NFS appliance must be optimized for network file access and because an appliance must be easy to use. This paper describes WAFL (Write Anywhere File Layout), which is a file system designed specifically to work in an NFS appliance. The primary focus is on the algorithms and data structures that WAFL uses to implement Snapshotst, which are read-only clones of the active file system. WAFL uses a copy-on-write technique to minimize the disk space that Snapshots consume. This paper also describes how WAFL uses Snapshots to eliminate the need for file system consistency checking after an unclean shutdown}, url = {http://portal.acm.org/citation.cfm?id=1267093$\#$}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.40.3691.pdf}, www_section = {Unsorted}, }
@conference{1267366, title = {Compare-by-hash: a reasoned analysis}, author = {Black, John}, booktitle = {ATEC '06: Proceedings of the annual conference on USENIX '06 Annual Technical Conference}, organization = {USENIX Association}, year = {2006}, address = {Berkeley, CA, USA}, pages = {7--7}, publisher = {USENIX Association}, abstract = {Compare-by-hash is the now-common practice used by systems designers who assume that when the digest of a cryptographic hash function is equal on two distinct files, then those files are identical. This approach has been used in both real projects and in research efforts (for example rysnc [16] and LBFS [12]). A recent paper by Henson criticized this practice [8]. The present paper revisits the topic from an advocate's standpoint: we claim that compare-by-hash is completely reasonable, and we offer various arguments in support of this viewpoint in addition to addressing concerns raised by Henson}, url = {http://portal.acm.org/citation.cfm?id=1267366$\#$}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.125.4474.pdf}, %%%%% ERROR: Missing field % www_section = {?????}, }
@conference{1267576, title = {Establishing identity without certification authorities}, author = {Ellison, Carl M.}, booktitle = {SSYM'96: Proceedings of the 6th conference on USENIX Security Symposium, Focusing on Applications of Cryptography}, organization = {USENIX Association}, year = {1996}, address = {Berkeley, CA, USA}, pages = {7--7}, publisher = {USENIX Association}, abstract = {this paper is that a traditional identity certificate is neither necessary nor sufficient for this purpose. It is especially useless if the two parties concerned did not have the foresight to obtain such certificates before desiring to open a secure channel. There are many methods for establishing identity without using certificates from trusted certification authorities. The relationship between verifier and subject guides the choice of method. Many of these relationships have easy, straight-forward methods for binding a public key to an identity, using a broadcast channel or 1:1 meetings, but one relationship makes it especially difficult. That relationship is one with an old friend with whom you had lost touch but who appears now to be available on the net. You make contact and share a few exchanges which suggest to you that this is, indeed, your old friend. Then you want to form a secure channel in order to carry on a more extensive conversation in private. This case is subject to the man-in-themiddle attack. For this case, a protocol is presented which binds a pair of identities to a pair of public keys without using any certificates issued by a trusted CA. The apparent direct conflict between conventional wisdom and the thesis of this paper lies in the definition of the word "identity" -- a word which is commonly left undefined in discussions of certification}, www_section = {certificate revocation, public key cryptography}, url = {http://portal.acm.org/citation.cfm?id=1267576$\#$}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.31.7263.pdf}, }
@conference{1268712, title = {Operation-based update propagation in a mobile file system}, author = {Lee, Yui-Wah and Leung, Kwong-Sak and Satyanarayanan, Mahadev}, booktitle = {ATEC '99: Proceedings of the annual conference on USENIX Annual Technical Conference}, organization = {USENIX Association}, year = {1999}, address = {Berkeley, CA, USA}, pages = {4--4}, publisher = {USENIX Association}, abstract = {In this paper we describe a technique called operation-based update propagation for efficiently transmitting updates to large files that have been modified on a weakly connected client of a distributed file system. In this technique, modifications are captured above the file-system layer at the client, shipped to a surrogate client that is strongly connected to a server, re-executed at the surrogate, and the resulting files transmitted from the surrogate to the server. If re-execution fails to produce a file identical to the original, the system falls back to shipping the file from the client over the slow network. We have implemented a prototype of this mechanism in the Coda File System on Linux, and demonstrated performance improvements ranging from 40 percents to nearly three orders of magnitude in reduced network traffic and elapsed time. We also found a novel use of forward error correction in this context}, url = {http://portal.acm.org/citation.cfm?id=1268712$\#$}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/lee.pdf}, %%%%% ERROR: Missing field % www_section = {?????}, }
@conference{1270971, title = {Towards Fair Event Dissemination}, author = {Baehni, Sebastien and Rachid Guerraoui and Boris Koldehofe and Monod, Maxime}, booktitle = {ICDCSW '07: Proceedings of the 27th International Conference on Distributed Computing Systems Workshops}, organization = {IEEE Computer Society}, year = {2007}, address = {Washington, DC, USA}, pages = {0--63}, publisher = {IEEE Computer Society}, abstract = {Event dissemination in large scale dynamic systems is typically claimed to be best achieved using decentralized peer-to-peer architectures. The rationale is to have every participant in the system act both as a client (information consumer) and as a server (information dissemination enabler), thus, precluding specific brokers which would prevent scalability and fault-tolerance. We argue that, for such decentralized architectures to be really meaningful, participants should serve the system as much as they benefit from it. That is, the system should be fair in the sense that the extend to which a participant acts as a server should depend on the extend to which it has the opportunity to act as a client. This is particularly crucial in selective information dissemination schemes where clients are not all interested in the same information. In this position paper, we discuss what a notion of fairness could look like, explain why current architectures are not fair, and raise several challenges towards achieving fairness}, isbn = {0-7695-2838-4}, doi = {10.1109/ICDCSW.2007.83}, url = {http://portal.acm.org/citation.cfm?id=1270388.1270971$\#$}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.90.9758.pdf}, %%%%% ERROR: Missing field % www_section = {?????}, }
@article{1273222, title = {Privacy protection in personalized search}, author = {Shen, Xuehua and Tan, Bin and Zhai, ChengXiang}, journal = {SIGIR Forum}, volume = {41}, number = {1}, year = {2007}, address = {New York, NY, USA}, pages = {4--17}, publisher = {ACM}, abstract = {Personalized search is a promising way to improve the accuracy of web search, and has been attracting much attention recently. However, effective personalized search requires collecting and aggregating user information, which often raise serious concerns of privacy infringement for many users. Indeed, these concerns have become one of the main barriers for deploying personalized search applications, and how to do privacy-preserving personalization is a great challenge. In this paper, we systematically examine the issue of privacy preservation in personalized search. We distinguish and define four levels of privacy protection, and analyze various software architectures for personalized search. We show that client-side personalization has advantages over the existing server-side personalized search services in preserving privacy, and envision possible future strategies to fully protect user privacy}, www_section = {privacy, search}, issn = {0163-5840}, doi = {http://doi.acm.org/10.1145/1273221.1273222}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/2007j_sigirforum_shen.pdf}, }
@article{1273450, title = {On compact routing for the internet}, author = {Krioukov, Dmitri and Fall, Kevin and Brady, Arthur}, journal = {SIGCOMM Comput. Commun. Rev}, volume = {37}, number = {3}, year = {2007}, address = {New York, NY, USA}, pages = {41--52}, publisher = {ACM}, abstract = {The Internet's routing system is facing stresses due to its poor fundamental scaling properties. Compact routing is a research field that studies fundamental limits of routing scalability and designs algorithms that try to meet these limits. In particular, compact routing research shows that shortest-path routing, forming a core of traditional routing algorithms, cannot guarantee routing table (RT) sizes that on all network topologies grow slower than linearly as functions of the network size. However, there are plenty of compact routing schemes that relax the shortest-path requirement and allow for improved, sublinear RT size scaling that is mathematically provable for all static network topologies. In particular, there exist compact routing schemes designed for grids, trees, and Internet-like topologies that offer RT sizes that scale logarithmically with the network size. In this paper, we demonstrate that in view of recent results in compact routing research, such logarithmic scaling on Internet-like topologies is fundamentally impossible in the presence of topology dynamics or topology-independent (flat) addressing. We use analytic arguments to show that the number of routing control messages per topology change cannot scale better than linearly on Internet-like topologies. We also employ simulations to confirm that logarithmic RT size scaling gets broken by topology-independent addressing, a cornerstone of popular locator-identifier split proposals aiming at improving routing scaling in the presence of network topology dynamics or host mobility. These pessimistic findings lead us to the conclusion that a fundamental re-examination of assumptions behind routing models and abstractions is needed in order to find a routing architecture that would be able to scale "indefinitely}, www_section = {compact routing, internet routing, routing scalability}, issn = {0146-4833}, doi = {10.1145/1273445.1273450}, url = {http://portal.acm.org/citation.cfm?id=1273450$\#$}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.102.5763.pdf}, }
@article{1290327, title = {On improving the efficiency of truthful routing in MANETs with selfish nodes}, author = {Wang, Yongwei and Singhal, Mukesh}, journal = {Pervasive Mob. Comput}, volume = {3}, number = {5}, year = {2007}, address = {Amsterdam, The Netherlands, The Netherlands}, pages = {537--559}, publisher = {Elsevier Science Publishers B. V}, abstract = {In Mobile Ad Hoc Networks (MANETs), nodes depend upon each other for routing and forwarding packets. However, nodes belonging to independent authorities in MANETs may behave selfishly and may not forward packets to save battery and other resources. To stimulate cooperation, nodes are rewarded for their forwarding service. Since nodes spend different cost to forward packets, it is desirable to reimburse nodes according to their cost so that nodes get incentive while the least total payment is charged to the sender. However, to maximize their utility, nodes may tell lie about their cost. This poses the requirement of truthful protocols, which maximizes the utility of nodes only when they declare their true cost. Anderegg and Eidenbenz recently proposed a truthful routing protocol, named ad hoc-VCG. This protocol incurs the route discovery overhead of O(n3), where n is the number of nodes in the network. This routing overhead is likely to become prohibitively large as the network size grows. Moreover, it leads to low network performance due to congestion and interference. We present a low-overhead truthful routing protocol for route discovery in MANETs with selfish nodes by applying mechanism design. The protocol, named LOTTO (Low Overhead Truthful rouTing prOtocol), finds a least cost path for data forwarding with a lower routing overhead of O(n2). We conduct an extensive simulation study to evaluate the performance of our protocol and compare it with ad hoc-VCG. Simulation results show that our protocol provides a much higher packet delivery ratio, generates much lower overhead and has much lower end-to-end delay}, www_section = {mobile Ad-hoc networks, routing, VCG mechanism}, issn = {1574-1192}, doi = {10.1016/j.pmcj.2007.02.001}, url = {http://portal.acm.org/citation.cfm?id=1290327$\#$}, }
@conference{1326260, title = {Skype4Games}, author = {Triebel, Tonio and Guthier, Benjamin and Effelsberg, Wolfgang}, booktitle = {NetGames '07: Proceedings of the 6th ACM SIGCOMM workshop on Network and system support for games}, organization = {ACM}, year = {2007}, address = {New York, NY, USA}, pages = {13--18}, publisher = {ACM}, abstract = {We propose to take advantage of the distributed multi-user Skype system for the implementation of an interactive online game. Skype combines efficient multi-peer support with the ability to get around firewalls and network address translation; in addition, speech is available to all game participants for free. We discuss the network requirements of interactive multi-player games, in particular concerning end-to-end delay and distributed state maintenance. We then introduce the multi-user support available in Skype and conclude that it should suffice for a game implementation. We explain how our multi-player game based on the Irrlicht graphics engine was implemented over Skype, and we present very promising results of an early performance evaluation}, www_section = {distributed interactive applications, P2P}, isbn = {978-0-9804460-0-5}, doi = {10.1145/1326257.1326260}, url = {http://portal.acm.org/citation.cfm?id=1326260$\#$}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Triebel2007a.pdf}, }
@article{1327188, title = {Private Searching on Streaming Data}, author = {Rafail Ostrovsky and William E. Skeith}, journal = {J. Cryptol}, volume = {20}, number = {4}, year = {2007}, address = {Secaucus, NJ, USA}, pages = {397--430}, publisher = {Springer-Verlag New York, Inc}, abstract = {In this paper we consider the problem of private searching on streaming data, where we can efficiently implement searching for documents that satisfy a secret criteria (such as the presence or absence of a hidden combination of hidden keywords) under various cryptographic assumptions. Our results can be viewed in a variety of ways: as a generalization of the notion of private information retrieval (to more general queries and to a streaming environment); as positive results on privacy-preserving datamining; and as a delegation of hidden program computation to other machines}, www_section = {keywords, privacy, private information retrieval, search, streaming}, issn = {0933-2790}, doi = {http://dx.doi.org/10.1007/s00145-007-0565-3}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Ostrovsky-Skeith.pdf}, }
@mastersthesis{1329865, title = {Cheat-proof event ordering for large-scale distributed multiplayer games}, author = {Chis GauthierDickey}, school = {University of Oregon}, year = {2007}, address = {Eugene, OR, USA}, type = {phd}, note = {Adviser-Lo, Virginia}, abstract = {Real-time, interactive, multi-user (RIM) applications are networked applications that allow users to collaborate and interact with each other over the Internet for work, education and training, or entertainment purposes. Multiplayer games, distance learning applications, collaborative whiteboards, immersive educational and training simulations, and distributed interactive simulations are examples of these applications. Of these RIM applications, multiplayer games are an important class for research due to their widespread deployment and popularity on the Internet. Research with multiplayer games will have a direct impact on all RIM applications. While large-scale multiplayer games have typically used a client/server architecture for network communication, we propose using a peer-to-peer architecture to solve the scalability problems inherent in centralized systems. Past research and actual deployments of peer-to-peer networks show that they can scale to millions of users. However, these prior peer-to-peer networks do not meet the low latency and interactive requirements that multi-player games need. Indeed, the fundamental problem of maintaining consistency between all nodes in the face of failures, delays, and malicious attacks has to be solved to make a peer-to-peer networks a viable solution. We propose solving the consistency problem through secure and scalable event ordering. While traditional event ordering requires all-to-all message passing and at least two rounds of communication, we argue that multiplayer games lend themselves naturally to a hierarchical decomposition of their state space so that we can reduce the communication cost of event ordering. We also argue that by using cryptography, a discrete view of time, and majority voting, we can totally order events in a real-time setting. By applying these two concepts, we can scale multiplayer games to millions of players. We develop our solution in two parts: a cheat-proof and real-time event ordering protocol and a scalable, hierarchical structure that organizes peers in a tree according to their scope of interest in the game. Our work represents the first, complete solution to this problem and we show through both proofs and simulations that our protocols allow the creation of large-scale, peer-to-peer games that are resistant to cheating while maintaining real-time responsiveness in the system}, url = {http://portal.acm.org/citation.cfm?id=1329865$\#$}, %%%%% ERROR: Missing field % www_section = {?????}, }
@article{1341892, title = {ODSBR: An on-demand secure Byzantine resilient routing protocol for wireless ad hoc networks}, author = {Awerbuch, Baruch and Curtmola, Reza and Holmer, David and Nita-Rotaru, Cristina and Rubens, Herbert}, journal = {ACM Trans. Inf. Syst. Secur}, volume = {10}, number = {4}, year = {2008}, address = {New York, NY, USA}, pages = {1--35}, publisher = {ACM}, abstract = {Ah hoc networks offer increased coverage by using multihop communication. This architecture makes services more vulnerable to internal attacks coming from compromised nodes that behave arbitrarily to disrupt the network, also referred to as Byzantine attacks. In this work, we examine the impact of several Byzantine attacks performed by individual or colluding attackers. We propose ODSBR, the first on-demand routing protocol for ad hoc wireless networks that provides resilience to Byzantine attacks caused by individual or colluding nodes. The protocol uses an adaptive probing technique that detects a malicious link after log n faults have occurred, where n is the length of the path. Problematic links are avoided by using a route discovery mechanism that relies on a new metric that captures adversarial behavior. Our protocol never partitions the network and bounds the amount of damage caused by attackers. We demonstrate through simulations ODSBR's effectiveness in mitigating Byzantine attacks. Our analysis of the impact of these attacks versus the adversary's effort gives insights into their relative strengths, their interaction, and their importance when designing multihop wireless routing protocols}, www_section = {ad-hoc networks, byzantine fault tolerance, on-demand routing, security model}, issn = {1094-9224}, doi = {10.1145/1284680.1341892}, url = {http://portal.acm.org/citation.cfm?id=1284680.1341892$\#$}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/ODSBR-TISSEC.pdf}, }
@conference{1345798, title = {Dependability Evaluation of Cooperative Backup Strategies for Mobile Devices}, author = {Ludovic Court{\`e}s and Hamouda, Ossama and Kaaniche, Mohamed and Killijian, Marc-Olivier and Powell, David}, booktitle = {PRDC '07: Proceedings of the 13th Pacific Rim International Symposium on Dependable Computing}, organization = {IEEE Computer Society}, year = {2007}, address = {Washington, DC, USA}, pages = {139--146}, publisher = {IEEE Computer Society}, abstract = {Mobile devices (e.g., laptops, PDAs, cell phones) are increasingly relied on but are used in contexts that put them at risk of physical damage, loss or theft. This paper discusses the dependability evaluation of a cooperative backup service for mobile devices. Participating devices leverage encounters with other devices to temporarily replicate critical data. Permanent backups are created when the participating devices are able to access the fixed infrastructure. Several data replication and scattering strategies are presented,including the use of erasure codes. A methodology to model and evaluate them using Petri nets and Markov chains is described. We demonstrate that our cooperative backup service decreases the probability of data loss by a factor up to the ad hoc to Internet connectivity ratio}, isbn = {0-7695-3054-0}, doi = {10.1109/PRDC.2007.29}, url = {http://portal.acm.org/citation.cfm?id=1345534.1345798$\#$}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.65.8269_0.pdf}, %%%%% ERROR: Missing field % www_section = {?????}, }
@article{1358311, title = {Linyphi: creating IPv6 mesh networks with SSR}, author = {Di, Pengfei and Johannes Eickhold and Thomas Fuhrmann}, journal = {Concurr. Comput. : Pract. Exper}, volume = {20}, number = {6}, year = {2008}, address = {Chichester, UK}, pages = {675--691}, publisher = {John Wiley and Sons Ltd}, abstract = {Scalable source routing (SSR) is a self-organizing routing protocol which is especially suited for networks that do not have a well-crafted structure, e.g. ad hoc and mesh networks. SSR works on a flat identifier space. As a consequence, it can easily support host mobility without requiring any location directory or other centralized service. SSR is based on a virtual ring structure, which is used in a chord-like manner to obtain source routes to previously unknown destinations. It has been shown that SSR requires very little per node state and produces very little control messages. In particular, SSR has been found to outperform other ad hoc routing protocols such as ad hoc on-demand distance vector routing, optimized link-state routing, or beacon vector routing. In this paper we present Linyphi, an implementation of SSR for wireless access routers. Linyphi combines IPv6 and SSR so that unmodified IPv6 hosts have transparent connectivity to both the Linyphi mesh network and the IPv4-v6 Internet. We give a basic outline of the implementation and demonstrate its suitability in real-world mesh network scenarios. Furthermore, we illustrate the use of Linyphi for distributed applications such as the Linyphone peer-to-peer VoIP application. Copyright {\textcopyright} 2008 John Wiley \& Sons, Ltd}, www_section = {scalable source routing}, issn = {1532-0626}, doi = {10.1002/cpe.v20:6}, url = {http://portal.acm.org/citation.cfm?id=1358302.1358311$\#$}, }
@conference{1361410, title = {Purely functional system configuration management}, author = {Dolstra, Eelco and Hemel, Armijn}, booktitle = {HOTOS'07: Proceedings of the 11th USENIX workshop on Hot topics in operating systems}, organization = {USENIX Association}, year = {2007}, address = {Berkeley, CA, USA}, pages = {1--6}, publisher = {USENIX Association}, abstract = {System configuration management is difficult because systems evolve in an undisciplined way: packages are upgraded, configuration files are edited, and so on. The management of existing operating systems is strongly imperative in nature, since software packages and configuration data (e.g., /bin and /etc in Unix) can be seen as imperative data structures: they are updated in-place by system administration actions. In this paper we present an alternative approach to system configuration management: a purely functional method, analogous to languages like Haskell. In this approach, the static parts of a configuration -- software packages, configuration files, control scripts -- are built from pure functions, i.e., the results depend solely on the specified inputs of the function and are immutable. As a result, realising a system configuration becomes deterministic and reproducible. Upgrading to a new configuration is mostly atomic and doesn't overwrite anything of the old configuration, thus enabling rollbacks. We have implemented the purely functional model in a small but realistic Linux-based operating system distribution called NixOS}, url = {http://portal.acm.org/citation.cfm?id=1361410$\#$}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/dolstra.pdf}, %%%%% ERROR: Missing field % www_section = {?????}, }
@article{1373458, title = {Efficient routing in intermittently connected mobile networks: the single-copy case}, author = {Spyropoulos, Thrasyvoulos and Psounis, Konstantinos and Raghavendra, Cauligi S.}, journal = {IEEE/ACM Trans. Netw}, volume = {16}, number = {1}, year = {2008}, address = {Piscataway, NJ, USA}, pages = {63--76}, publisher = {IEEE Press}, abstract = {Intermittently connected mobile networks are wireless networks where most of the time there does not exist a complete path from the source to the destination. There are many real networks that follow this model, for example, wildlife tracking sensor networks, military networks, vehicular ad hoc networks (VANETs), etc. In this context, conventional routing schemes would fail, because they try to establish complete end-to-end paths, before any data is sent. To deal with such networks researchers have suggested to use flooding-based routing schemes. While flooding-based schemes have a high probability of delivery, they waste a lot of energy and suffer from severe contention which can significantly degrade their performance. With this in mind, we look into a number of "single-copy" routing schemes that use only one copy per message, and hence significantly reduce the resource requirements of flooding-based algorithms. We perform a detailed exploration of the single-copy routing space in order to identify efficient single-copy solutions that (i) can be employed when low resource usage is critical, and (ii) can help improve the design of general routing schemes that use multiple copies. We also propose a theoretical framework that we use to analyze the performance of all single-copy schemes presented, and to derive upper and lower bounds on the delay of any scheme}, www_section = {mobile Ad-hoc networks, routing}, issn = {1063-6692}, doi = {10.1109/TNET.2007.897962}, url = {http://portal.acm.org/citation.cfm?id=1373458$\#$}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.74.8097.pdf}, }
@article{1373992, title = {Characterizing unstructured overlay topologies in modern P2P file-sharing systems}, author = {Stutzbach, Daniel and Rejaie, Reza and Sen, Subhabrata}, journal = {IEEE/ACM Trans. Netw}, volume = {16}, number = {2}, year = {2008}, address = {Piscataway, NJ, USA}, pages = {267--280}, publisher = {IEEE Press}, abstract = {In recent years, peer-to-peer (P2P) file-sharing systems have evolved to accommodate growing numbers of participating peers. In particular, new features have changed the properties of the unstructured overlay topologies formed by these peers. Little is known about the characteristics of these topologies and their dynamics in modern file-sharing applications, despite their importance. This paper presents a detailed characterization of P2P overlay topologies and their dynamics, focusing on the modern Gnutella network. We present Cruiser, a fast and accurate P2P crawler, which can capture a complete snapshot of the Gnutella network of more than one million peers in just a few minutes, and show how inaccuracy in snapshots can lead to erroneous conclusions--such as a power-law degree distribution. Leveraging recent overlay snapshots captured with Cruiser, we characterize the graph-related properties of individual overlay snapshots and overlay dynamics across slices of back-to-back snapshots. Our results reveal that while the Gnutella network has dramatically grown and changed in many ways, it still exhibits the clustering and short path lengths of a small world network. Furthermore, its overlay topology is highly resilient to random peer departure and even systematic attacks. More interestingly, overlay dynamics lead to an "onion-like" biased connectivity among peers where each peer is more likely connected to peers with higher uptime. Therefore, long-lived peers form a stable core that ensures reachability among peers despite overlay dynamics}, www_section = {file-sharing, P2P}, issn = {1063-6692}, doi = {10.1109/TNET.2007.900406}, url = {http://portal.acm.org/citation.cfm?id=1373992$\#$}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/stutzbach.pdf}, }
@conference{1387603, title = {BFT protocols under fire}, author = {Singh, Atul and Das, Tathagata and Maniatis, Petros and Peter Druschel and Roscoe, Timothy}, booktitle = {NSDI'08: Proceedings of the 5th USENIX Symposium on Networked Systems Design and Implementation}, organization = {USENIX Association}, year = {2008}, address = {Berkeley, CA, USA}, pages = {189--204}, publisher = {USENIX Association}, abstract = {Much recent work on Byzantine state machine replication focuses on protocols with improved performance under benign conditions (LANs, homogeneous replicas, limited crash faults), with relatively little evaluation under typical, practical conditions (WAN delays, packet loss, transient disconnection, shared resources). This makes it difficult for system designers to choose the appropriate protocol for a real target deployment. Moreover, most protocol implementations differ in their choice of runtime environment, crypto library, and transport, hindering direct protocol comparisons even under similar conditions. We present a simulation environment for such protocols that combines a declarative networking system with a robust network simulator. Protocols can be rapidly implemented from pseudocode in the high-level declarative language of the former, while network conditions and (measured) costs of communication packages and crypto primitives can be plugged into the latter. We show that the resulting simulator faithfully predicts the performance of native protocol implementations, both as published and as measured in our local network. We use the simulator to compare representative protocols under identical conditions and rapidly explore the effects of changes in the costs of crypto operations, workloads, network conditions and faults. For example, we show that Zyzzyva outperforms protocols like PBFT and Q/U undermost but not all conditions, indicating that one-size-fits-all protocols may be hard if not impossible to design in practice}, isbn = {111-999-5555-22-1}, url = {http://portal.acm.org/citation.cfm?id=1387603$\#$}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/BFTSim-nsdi08.pdf}, %%%%% ERROR: Missing field % www_section = {?????}, }
@article{1390683, title = {Linear-Time Computation of Similarity Measures for Sequential Data}, author = {Rieck, Konrad and Laskov, Pavel}, journal = {J. Mach. Learn. Res}, volume = {9}, year = {2008}, pages = {23--48}, publisher = {JMLR.org}, abstract = {Efficient and expressive comparison of sequences is an essential procedure for learning with sequential data. In this article we propose a generic framework for computation of similarity measures for sequences, covering various kernel, distance and non-metric similarity functions. The basis for comparison is embedding of sequences using a formal language, such as a set of natural words, k-grams or all contiguous subsequences. As realizations of the framework we provide linear-time algorithms of different complexity and capabilities using sorted arrays, tries and suffix trees as underlying data structures. Experiments on data sets from bioinformatics, text processing and computer security illustrate the efficiency of the proposed algorithms---enabling peak performances of up to 106 pairwise comparisons per second. The utility of distances and non-metric similarity measures for sequences as alternatives to string kernels is demonstrated in applications of text categorization, network intrusion detection and transcription site recognition in DNA}, issn = {1532-4435}, url = {http://portal.acm.org/citation.cfm?id=1390683$\#$}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/jmlr08.pdf}, %%%%% ERROR: Missing field % www_section = {?????}, }
@conference{1396915, title = {S/Kademlia: A practicable approach towards secure key-based routing}, author = {Baumgart, Ingmar and Sebastian Mies}, booktitle = {ICPADS '07: Proceedings of the 13th International Conference on Parallel and Distributed Systems}, organization = {IEEE Computer Society}, year = {2007}, address = {Washington, DC, USA}, pages = {1--8}, publisher = {IEEE Computer Society}, abstract = {Security is a common problem in completely decentralized peer-to-peer systems. Although several suggestions exist on how to create a secure key-based routing protocol, a practicable approach is still unattended. In this paper we introduce a secure key-based routing protocol based on Kademlia that has a high resilience against common attacks by using parallel lookups over multiple disjoint paths, limiting free nodeId generation with crypto puzzles and introducing a reliable sibling broadcast. The latter is needed to store data in a safe replicated way. We evaluate the security of our proposed extensions to the Kademlia protocol analytically and simulate the effects of multiple disjoint paths on lookup success under the influence of adversarial nodes}, isbn = {978-1-4244-1889-3}, doi = {10.1109/ICPADS.2007.4447808}, url = {http://portal.acm.org/citation.cfm?id=1396915$\#$}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/SKademlia2007.pdf}, %%%%% ERROR: Missing field % www_section = {?????}, }
@conference{1424615, title = {Trust-Rated Authentication for Domain-Structured Distributed Systems}, author = {Ralph Holz and Heiko Niedermayer and Hauck, Peter and Carle, Georg}, booktitle = {EuroPKI '08: Proceedings of the 5th European PKI workshop on Public Key Infrastructure}, organization = {Springer-Verlag}, year = {2008}, address = {Berlin, Heidelberg}, pages = {74--88}, publisher = {Springer-Verlag}, abstract = {We present an authentication scheme and new protocol for domain-based scenarios with inter-domain authentication. Our protocol is primarily intended for domain-structured Peer-to-Peer systems but is applicable for any domain scenario where clients from different domains wish to authenticate to each other. To this end, we make use of Trusted Third Parties in the form of Domain Authentication Servers in each domain. These act on behalf of their clients, resulting in a four-party protocol. If there is a secure channel between the Domain Authentication Servers, our protocol can provide secure authentication. To address the case where domains do not have a secure channel between them, we extend our scheme with the concept of trust-rating. Domain Authentication Servers signal security-relevant information to their clients (pre-existing secure channel or not, trust, ...). The clients evaluate this information to decide if it fits the security requirements of their application}, www_section = {authentication, distributed systems, P2P, PKI, trust}, isbn = {978-3-540-69484-7}, doi = {10.1007/978-3-540-69485-4}, url = {http://www.springerlink.com/content/k6786282r5378k42/}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/AuthenticationEuroPKI2008.pdf}, }
@conference{1456474, title = {Tahoe: the least-authority filesystem}, author = {Wilcox-O'Hearn, Zooko and Warner, Brian}, booktitle = {StorageSS '08: Proceedings of the 4th ACM international workshop on Storage security and survivability}, organization = {ACM}, year = {2008}, address = {New York, NY, USA}, pages = {21--26}, publisher = {ACM}, abstract = {Tahoe is a system for secure, distributed storage. It uses capabilities for access control, cryptography for confidentiality and integrity, and erasure coding for fault-tolerance. It has been deployed in a commercial backup service and is currently operational. The implementation is Open Source}, www_section = {capabilities, fault-tolerance, P2P}, isbn = {978-1-60558-299-3}, doi = {10.1145/1456469.1456474}, url = {http://portal.acm.org/citation.cfm?id=1456474$\#$}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/lafs.pdf}, }
@article{1461118, title = {Shortest-path routing in randomized DHT-based Peer-to-Peer systems}, author = {Wang, Chih-Chiang and Harfoush, Khaled}, journal = {Comput. Netw}, volume = {52}, number = {18}, year = {2008}, address = {New York, NY, USA}, pages = {3307--3317}, publisher = {Elsevier North-Holland, Inc}, abstract = {Randomized DHT-based Peer-to-Peer (P2P) systems grant nodes certain flexibility in selecting their overlay neighbors, leading to irregular overlay structures but to better overall performance in terms of path latency, static resilience and local convergence. However, routing in the presence of overlay irregularity is challenging. In this paper, we propose a novel routing protocol, RASTER, that approximates shortest overlay routes between nodes in randomized DHTs. Unlike previously proposed routing protocols, RASTER encodes and aggregates routing information. Its simple bitmap-encoding scheme together with the proposed RASTER routing algorithm enable a performance edge over current overlay routing protocols. RASTER provides a forwarding overhead of merely a small constant number of bitwise operations, a routing performance close to optimal, and a better resilience to churn. RASTER also provides nodes with the flexibility to adjust the size of the maintained routing information based on their storage/processing capabilities. The cost of storing and exchanging encoded routing information is manageable and grows logarithmically with the number of nodes in the system}, www_section = {distributed hash table, P2P, routing}, issn = {1389-1286}, doi = {10.1016/j.comnet.2008.07.014}, url = {http://portal.acm.org/citation.cfm?id=1461118$\#$}, }
@article{15043, title = {Revised report on the algorithmic language scheme}, author = {Rees, Jonathan and Clinger, William and Richard Kelsey}, journal = {SIGPLAN Not}, volume = {21}, number = {12}, year = {1986}, address = {New York, NY, USA}, pages = {37--79}, publisher = {ACM}, abstract = {The report gives a defining description of the programming language Scheme. Scheme is a statically scoped and properly tail-recursive dialect of the Lisp programming language invented by Guy Lewis Steele Jr. and Gerald Jay Sussman. It was designed to have an exceptionally clear and simple semantics and few different ways to form expressions. A wide variety of programming paradigms, including imperative, functional, and message passing styles, find convenient expression in Scheme. The introduction offers a brief history of the language and of the report. The first three chapters present the fundamental ideas of the language and describe the notational conventions used for describing the language and for writing programs in the language}, issn = {0362-1340}, doi = {10.1145/15042.15043}, url = {http://en.scientificcommons.org/42347723}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/r5rs.pdf}, www_section = {Unsorted}, }
@conference{1524297, title = {Query Forwarding Algorithm Supporting Initiator Anonymity in GNUnet}, author = {Tatara, Kohei and Hori, Y. and Sakurai, Kouichi}, booktitle = {Parallel and Distributed Systems, 2005. Proceedings. 11th International Conference on}, volume = {2}, year = {2005}, month = {July}, pages = {235--239}, abstract = {Anonymity in peer-to-peer network means that it is difficult to associate a particular communication with a sender or a recipient. Recently, anonymous peer-to-peer framework, called GNUnet, was developed. A primary feature of GNUnet is resistance to traffic-analysis. However, Kugler analyzed a routing protocol in GNUnet, and pointed out traceability of initiator. In this paper, we propose an alternative routing protocol applicable in GNUnet, which is resistant to Kugler's shortcut attacks}, www_section = {anonymity, GNUnet, routing, shortcut}, issn = {1521-9097}, doi = {10.1109/ICPADS.2005.246}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/kugler2.pdf}, }
@conference{1551621, title = {Maintaining reference graphs of globally accessible objects in fully decentralized distributed systems}, author = {Bjoern Saballus and Thomas Fuhrmann}, booktitle = {HPDC '09: Proceedings of the 18th ACM international symposium on High performance distributed computing}, organization = {ACM}, year = {2009}, address = {New York, NY, USA}, pages = {59--60}, publisher = {ACM}, abstract = {Since the advent of electronic computing, the processors' clock speed has risen tremendously. Now that energy efficiency requirements have stopped that trend, the number of processing cores per machine started to rise. In near future, these cores will become more specialized, and their inter-connections will form complex networks, both on-chip and beyond. This trend opens new fields of applications for high performance computing: Heterogeneous architectures offer different functionalities and thus support a wider range of applications. The increased compute power of these systems allows more complex simulations and numerical computations. Falling costs enable even small companies to invest in multi-core systems and clusters. However, the growing complexity might impede this growth. Imagine a cluster of thousands of interconnected heterogeneous processor cores. A software developer will need a deep knowledge about the underlying infrastructure as well as the data and communication dependencies in her application to partition it optimally across the available cores. Moreover, a predetermined partitioning scheme cannot reflect failing processors or additionally provided resources. In our poster, we introduce J-Cell, a project that aims at simplifying high performance distributed computing. J-Cell offers a single system image, which allows applications to run transparently on heterogeneous multi-core machines. It distributes code, objects and threads onto the compute resources which may be added or removed at run-time. This dynamic property leads to an ad-hoc network of processors and cores. In this network, a fully decentralized object localization and retrieval algorithm guarantees the access to distributed shared objects}, www_section = {globally accessible objects, single system image}, isbn = {978-1-60558-587-1}, doi = {10.1145/1551609.1551621}, url = {http://portal.acm.org/citation.cfm?id=1551609.1551621$\#$}, }
@conference{1582481, title = {Using link-layer broadcast to improve scalable source routing}, author = {Di, Pengfei and Thomas Fuhrmann}, booktitle = {IWCMC '09: Proceedings of the 2009 International Conference on Wireless Communications and Mobile Computing}, organization = {ACM}, year = {2009}, month = {January}, address = {New York, NY, USA}, pages = {466--471}, publisher = {ACM}, abstract = {Scalable source routing (SSR) is a network layer routing protocol that provides services that are similar to those of structured peer-to-peer overlays. In this paper, we describe several improvements to the SSR protocol. They aim at providing nodes with more up-to-date routing information: 1. The use of link-layer broadcast enables all neighbors of a node to contribute to the forwarding process. 2. A light-weight and fast selection mechanism avoids packet duplication and optimizes the source route iteratively. 3. Nodes implicitly learn the network's topology from overheard broadcast messages. We present simulation results which show the performance gain of the proposed improvements: 1. The delivery ratio in settings with high mobility increases. 2. The required per-node state can be reduced as compared with the original SSR protocol. 3. The route stretch decreases. --- These improvements are achieved without increasing the routing overhead}, www_section = {mobile Ad-hoc networks, P2P, routing, scalable source routing}, isbn = {978-1-60558-569-7}, doi = {10.1145/1582379.1582481}, url = {http://portal.acm.org/citation.cfm?id=1582481$\#$}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/di09broadcastssr.pdf}, }
@conference{1590633, title = {Wireless Sensor Networks: A Survey}, author = {Potdar, Vidyasagar and Sharif, Atif and Chang, Elizabeth}, booktitle = {WAINA '09: Proceedings of the 2009 International Conference on Advanced Information Networking and Applications Workshops}, organization = {IEEE Computer Society}, year = {2009}, address = {Washington, DC, USA}, pages = {636--641}, publisher = {IEEE Computer Society}, abstract = {Wireless Sensor Networks (WSN), an element of pervasive computing, are presently being used on a large scale to monitor real-time environmental status. However these sensors operate under extreme energy constraints and are designed by keeping an application in mind. Designing a new wireless sensor node is extremely challenging task and involves assessing a number of different parameters required by the target application, which includes range, antenna type, target technology, components, memory, storage, power, life time, security, computational capability, communication technology, power, size, programming interface and applications. This paper analyses commercially (and research prototypes) available wireless sensor nodes based on these parameters and outlines research directions in this area}, www_section = {FPGA, wireless sensor network}, isbn = {978-0-7695-3639-2}, doi = {10.1109/WAINA.2009.192}, url = {http://portal.acm.org/citation.cfm?id=1588304.1590633$\#$}, }
@article{1646697, title = {Improving delivery ratios for application layer multicast in mobile ad hoc networks}, author = {Baumung, Peter and Martina Zitterbart and Kendy Kutzner}, journal = {Comput. Commun}, volume = {28}, number = {14}, year = {2005}, address = {Newton, MA, USA}, pages = {1669--1679}, publisher = {Butterworth-Heinemann}, abstract = {Delivering multicast data using application layer approaches offers different advantages, as group members communicate using so-called overlay networks. These consist of a multicast group's members connected by unicast tunnels. Since existing approaches for application layer delivery of multicast data in mobile ad hoc networks (short MANETs) only deal with routing but not with error recovery, this paper evaluates tailored mechanisms for handling packet losses and congested networks. Although illustrated at the example of a specific protocol, the mechanisms may be applied to arbitrary overlays. This paper also investigates how application layer functionality based on overlay networks can turn existing multicast routing protocols (like ODMRP, M-AODV,...) into (almost) reliable transport protocols}, www_section = {mobile Ad-hoc networks, multicast, reliability}, issn = {0140-3664}, doi = {10.1016/j.comcom.2005.02.008}, url = {http://portal.acm.org/citation.cfm?id=1646697$\#$}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.68.5832.pdf}, }
@conference{1656984, title = {Heterogeneous gossip}, author = {Frey, Davide and Rachid Guerraoui and Anne-Marie Kermarrec and Boris Koldehofe and Mogensen, Martin and Monod, Maxime and Qu{\'e}ma, Vivien}, booktitle = {Middleware '09: Proceedings of the 10th ACM/IFIP/USENIX International Conference on Middleware}, organization = {Springer-Verlag New York, Inc}, year = {2009}, address = {New York, NY, USA}, pages = {1--20}, publisher = {Springer-Verlag New York, Inc}, abstract = {Gossip-based information dissemination protocols are considered easy to deploy, scalable and resilient to network dynamics. Load-balancing is inherent in these protocols as the dissemination work is evenly spread among all nodes. Yet, large-scale distributed systems are usually heterogeneous with respect to network capabilities such as bandwidth. In practice, a blind load-balancing strategy might significantly hamper the performance of the gossip dissemination. This paper presents HEAP, HEterogeneity-Aware gossip Protocol, where nodes dynamically adapt their contribution to the gossip dissemination according to their bandwidth capabilities. Using a continuous, itself gossip-based, approximation of relative bandwidth capabilities, HEAP dynamically leverages the most capable nodes by increasing their fanout, while decreasing by the same proportion that of less capable nodes. HEAP preserves the simple and proactive (churn adaptation) nature of gossip, while significantly improving its effectiveness. We extensively evaluate HEAP in the context of a video streaming application on a testbed of 270 PlanetLab nodes. Our results show that HEAP significantly improves the quality of the streaming over standard homogeneous gossip protocols, especially when the stream rate is close to the average available bandwidth}, www_section = {heterogeneity, load balancing}, url = {http://portal.acm.org/citation.cfm?id=1656984$\#$}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/middleware-monod.pdf}, }
@conference{1658999, title = {Scalable landmark flooding: a scalable routing protocol for WSNs}, author = {Di, Pengfei and Thomas Fuhrmann}, booktitle = {Co-Next Student Workshop '09: Proceedings of the 5th international student workshop on Emerging networking experiments and technologies}, organization = {ACM}, year = {2009}, address = {New York, NY, USA}, pages = {1--2}, publisher = {ACM}, abstract = {Wireless sensor networks (WSNs) are about to become a popular and inexpensive tool for all kinds of applications. More advanced applications also need end-to-end routing, which goes beyond the simple data dissemination and collection mechanisms of early WSNs. The special properties of WSNs -- scarce memory, CPU, and energy resources -- make this a challenge. The Dynamic Address Routing protocol (DART) could be a good candidate for WSN routing, if it were not so prone to link outages. In this paper, we propose Scalable Landmark Flooding (SLF), a new routing protocol for large WSNs. It combines ideas from landmark routing, flooding, and dynamic address routing. SLF is robust against link and node outages, requires only little routing state, and generates low maintenance traffic overhead}, www_section = {wireless sensor network}, isbn = {978-1-60558-751-6}, doi = {10.1145/1658997.1658999}, url = {http://portal.acm.org/citation.cfm?id=1658997.1658999$\#$}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/di09slf.pdf}, }
@conference{1659021, title = {Bloom filters and overlays for routing in pocket switched networks}, author = {Mayer, Christoph P.}, booktitle = {Co-Next Student Workshop '09: Proceedings of the 5th international student workshop on Emerging networking experiments and technologies}, organization = {ACM}, year = {2009}, address = {New York, NY, USA}, pages = {43--44}, publisher = {ACM}, abstract = {Pocket Switched Networks (PSN) [3] have become a promising approach for providing communication between scarcely connected human-carried devices. Such devices, e.g. mobile phones or sensor nodes, are exposed to human mobility and can therewith leverage inter-human contacts for store-and-forward routing. Efficiently routing in such delay tolerant networks is complex due to incomplete knowledge about the network, and high dynamics of the network. In this work we want to develop an extension of Bloom filters for resource-efficient routing in pocket switched networks. Furthermore, we argue that PSNs may become densely populated in special situations. We want to exploit such situations to perform collaborative calculations of forwarding-decisions. In this paper we present a simple scheme for distributed decision calculation using overlays and a DHT-based distributed variant of Bloom filters}, www_section = {Bloom filter, overlay networks, pocket switched network}, isbn = {978-1-60558-751-6}, doi = {10.1145/1658997.1659021}, url = {http://portal.acm.org/citation.cfm?doid=1658997.1659021$\#$}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/conext09-phdworkshop-cameraready.pdf}, }
@article{1667071, title = {Privacy-preserving similarity-based text retrieval}, author = {Pang, Hweehwa and Shen, Jialie and Krishnan, Ramayya}, journal = {ACM Trans. Internet Technol}, volume = {10}, number = {1}, year = {2010}, address = {New York, NY, USA}, pages = {1--39}, publisher = {ACM}, abstract = {Users of online services are increasingly wary that their activities could disclose confidential information on their business or personal activities. It would be desirable for an online document service to perform text retrieval for users, while protecting the privacy of their activities. In this article, we introduce a privacy-preserving, similarity-based text retrieval scheme that (a) prevents the server from accurately reconstructing the term composition of queries and documents, and (b) anonymizes the search results from unauthorized observers. At the same time, our scheme preserves the relevance-ranking of the search server, and enables accounting of the number of documents that each user opens. The effectiveness of the scheme is verified empirically with two real text corpora}, www_section = {keywords, privacy, search, text mining}, issn = {1533-5399}, doi = {http://doi.acm.org/10.1145/1667067.1667071}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/privacy_preserving_similarity.pdf}, url = {https://bibliography.gnunet.org}, }
@article{1672334, title = {Reconnecting the internet with ariba: self-organizing provisioning of end-to-end connectivity in heterogeneous networks}, author = {H{\"u}bsch, Christian and Mayer, Christoph P. and Sebastian Mies and Roland Bless and Oliver Waldhorst and Martina Zitterbart}, journal = {SIGCOMM Comput. Commun. Rev}, volume = {40}, number = {1}, year = {2010}, address = {New York, NY, USA}, pages = {131--132}, publisher = {ACM}, abstract = {End-to-End connectivity in today's Internet can no longer be taken for granted. Middleboxes, mobility, and protocol heterogeneity complicate application development and often result in application-specific solutions. In our demo we present ariba: an overlay-based approach to handle such network challenges and to provide consistent homogeneous network primitives in order to ease application and service development}, www_section = {heterogeneity, overlay networks, P2P}, issn = {0146-4833}, doi = {10.1145/1672308.1672334}, url = {http://portal.acm.org/citation.cfm?doid=1672308.1672334$\#$}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/p131-v40n1n-huebschA.pdf}, }
@booklet{1698181, title = {Routing with Byzantine robustness}, author = {Perlman, Radia}, year = {2005}, address = {Mountain View, CA, USA}, publisher = {Sun Microsystems, Inc}, abstract = {This paper describes how a network can continue to function in the presence of Byzantine failures. A Byzantine failure is one in which a node, instead of halting (as it would in a fail-stop failure), continues to operate, but incorrectly. It might lie about routing information, perform the routing algorithm itself flawlessly, but then fail to forward some class of packets correctly, or flood the network with garbage traffic. Our goal is to design a network so that as long as one nonfaulty path connects nonfaulty nodes A and B, they will be able to communicate, with some fair share of bandwidth, even if all the other components in the network are maximally malicious. We review work from 1988 that presented a network design that had that property, but required the network to be small enough so that every router could keep state proportional to n2, where n is the total number of nodes in the network. This would work for a network of size on the order of a thousand nodes, but to build a large network, we need to introduce hierarchy. This paper presents a new design, building on the original work, that works with hierarchical networks. This design not only defends against malicious routers, but because it guarantees fair allocation of resources, can mitigate against many other types of denial of service attacks}, www_section = {routing}, url = {http://portal.acm.org/citation.cfm?id=1698181$\#$}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/smli_tr-2005-146.pdf}, }
@conference{1759877, title = {CFR: a peer-to-peer collaborative file repository system}, author = {Lin, Meng-Ru and Lu, Ssu-Hsuan and Ho, Tsung-Hsuan and Lin, Peter and Chung, Yeh-Ching}, booktitle = {GPC'07: Proceedings of the 2nd international conference on Advances in grid and pervasive computing}, organization = {Springer-Verlag}, year = {2007}, address = {Berlin, Heidelberg}, pages = {100--111}, publisher = {Springer-Verlag}, abstract = {Due to the high availability of the Internet, many large cross-organization collaboration projects, such as SourceForge, grid systems etc., have emerged. One of the fundamental requirements of these collaboration efforts is a storage system to store and exchange data. This storage system must be highly scalable and can efficiently aggregate the storage resources contributed by the participating organizations to deliver good performance for users. In this paper, we propose a storage system, Collaborative File Repository (CFR), for large scale collaboration projects. CFR uses peer-to-peer techniques to achieve scalability, efficiency, and ease of management. In CFR, storage nodes contributed by the participating organizations are partitioned according to geographical regions. Files stored in CFR are automatically replicated to all regions. Furthermore, popular files are duplicated to other storage nodes of the same region. By doing so, data transfers between users and storage nodes are confined within their regions and transfer efficiency is enhanced. Experiments show that our replication can achieve high efficiency with a small number of duplicates}, www_section = {P2P, storage}, isbn = {978-3-540-72359-2}, url = {http://portal.acm.org/citation.cfm?id=1759877\&dl=GUIDE\&coll=GUIDE$\#$}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.108.7110.pdf}, }
@conference{1827424, title = {Cordies: expressive event correlation in distributed systems}, author = {Gerald G. Koch and Boris Koldehofe and Kurt Rothermel}, booktitle = {DEBS '10: Proceedings of the Fourth ACM International Conference on Distributed Event-Based Systems}, organization = {ACM}, year = {2010}, address = {New York, NY, USA}, pages = {26--37}, publisher = {ACM}, abstract = {Complex Event Processing (CEP) is the method of choice for the observation of system states and situations by means of events. A number of systems have been introduced that provide CEP in selected environments. Some are restricted to centralised systems, or to systems with synchronous communication, or to a limited space of event relations that are defined in advance. Many modern systems, though, are inherently distributed and asynchronous, and require a more powerful CEP. We present Cordies, a distributed system for the detection of correlated events that is designed for the operation in large-scale, heterogeneous networks and adapts dynamically to changing network conditions. With its expressive language to describe event relations, it is suitable for environments where neither the event space nor the situations of interest are predefined but are constantly adapted. In addition, Cordies supports Quality-of-Service (QoS) for communication in distributed event correlation detection}, www_section = {QoS}, isbn = {978-1-60558-927-5}, doi = {10.1145/1827418.1827424}, url = {http://portal.acm.org/citation.cfm?id=1827424\&dl=GUIDE\&coll=portal\&CFID=97675623\&CFTOKEN=70931453$\#$}, }
@conference{1827425, title = {Providing basic security mechanisms in broker-less publish/subscribe systems}, author = {Tariq, Muhammad Adnan and Boris Koldehofe and Altaweel, Ala and Kurt Rothermel}, booktitle = {DEBS '10: Proceedings of the Fourth ACM International Conference on Distributed Event-Based Systems}, organization = {ACM}, year = {2010}, address = {New York, NY, USA}, pages = {38--49}, publisher = {ACM}, abstract = {The provisioning of basic security mechanisms such as authentication and confidentiality is highly challenging in a content-based publish/subscribe system. Authentication of publishers and subscribers is difficult to achieve due to the loose coupling of publishers and subscribers. Similarly, confidentiality of events and subscriptions conflicts with content-based routing. In particular, content-based approaches in broker-less environments do not address confidentiality at all. This paper presents a novel approach to provide confidentiality and authentication in a broker-less content-based publish-subscribe system. The authentication of publishers and subscribers as well as confidentiality of events is ensured, by adapting the pairing-based cryptography mechanisms, to the needs of a publish/subscribe system. Furthermore, an algorithm to cluster subscribers according to their subscriptions preserves a weak notion of subscription confidentiality. Our approach provides fine grained key management and the cost for encryption, decryption and routing is in the order of subscribed attributes. Moreover, the simulation results verify that supporting security is affordable with respect to the cost for overlay construction and event dissemination latencies, thus preserving scalability of the system}, www_section = {P2P, publish/subscribe}, isbn = {978-1-60558-927-5}, doi = {10.1145/1827418.1827425}, url = {http://portal.acm.org/citation.cfm?id=1827418.1827425\&coll=portal\&dl=GUIDE$\#$}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/DIP_2872.pdf}, }
@book{1944, title = {The Theory of Games and Economic Behavior}, author = {John von Neumann and Oskar Morgenstern}, organization = {Princeton University Press}, year = {1944}, address = {Princeton, New Jersey, USA}, edition = {60th}, pages = {0--776}, publisher = {Princeton University Press}, www_section = {economic behavior, games, theory}, isbn = {978-0-691-13061-3}, url = {http://www.archive.org/details/theoryofgamesand030098mbp}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Neumann\%20\%26\%20Morgenstern\%20-\%20Theory\%20of\%20Games\%20and\%20Economic\%20Behavior.pdf}, }
@article{1950, title = {Equilibrium points in n-person games}, author = {John F. Nash Jr.}, journal = {PNAS. Proceedings of the National Academy of Sciences of the USA}, volume = {36}, year = {1950}, month = {January}, pages = {48--49}, abstract = {One may define a concept of an n-person game in which each player has a finite set of pure strategies and in which a definite set of payments to the n players corresponds to each n-tuple of pure strategies, one strategy being taken for each player. For mixed strategies, which are probability distributions over the pure strategies, the pay-off functions are the expectations of the players, thus becoming polylinear forms}, www_section = {n-persona game, strategy}, doi = {10.1073/pnas.36.1.48}, url = {https://bibliography.gnunet.org}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/PNAS\%20-\%20Nash\%20-\%20Equilibrium\%20points\%20in\%20n-person\%20games.pdf}, }
@article{1959, title = {On Random Graphs I}, author = {Paul Erd{\H o}s and Alfr{\'e}d R{\'e}nyi}, journal = {Publicationes Mathematicae (Debrecen)}, volume = {6}, year = {1959}, month = {January}, pages = {290--297}, www_section = {graphs, random, random graphs}, url = {https://bibliography.gnunet.org}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Erd\%C5\%91s\%20\%26\%20R\%C3\%A9nyi\%20-\%20On\%20Random\%20Graphs.pdf}, }
@article{1962, title = {Low-density parity-check codes}, author = {Robert G. Gallager}, journal = {Information Theory, IRE Transactions on}, volume = {8}, year = {1962}, chapter = {21}, pages = {21--28}, abstract = {A low-density parity-check code is a code specified by a parity-check matrix with the following properties: each column contains a small fixed numberj geq 3of l's and each row contains a small fixed numberk > jof l's. The typical minimum distance of these codes increases linearly with block length for a fixed rate and fixedj. When used with maximum likelihood decoding on a sufficiently quiet binary-input symmetric channel, the typical probability of decoding error decreases exponentially with block length for a fixed rate and fixedj. A simple but nonoptimum decoding scheme operating directly from the channel a posteriori probabilities is described. Both the equipment complexity and the data-handling capacity in bits per second of this decoder increase approximately linearly with block length. Forj > 3and a sufficiently low rate, the probability of error using this decoder on a binary symmetric channel is shown to decrease at least exponentially with a root of the block length. Some experimental results show that the actual probability of decoding error is much smaller than this theoretical bound}, www_section = {coding theory, low-density parity-check}, issn = {0096-1000}, doi = {10.1109/TIT.1962.1057683}, url = {http://ieeexplore.ieee.org/xpl/freeabs_all.jsp?arnumber=1057683}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/ldpc.pdf}, }
@article{1968, title = {The Tragedy of the Commons}, author = {Garrett Hardin}, journal = {Science}, volume = {162}, year = {1968}, pages = {1243--1248}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Science\%20-\%20Hardin\%20-\%20The\%20Tragedy\%20of\%20the\%20Commons.pdf}, www_section = {Unsorted}, url = {https://bibliography.gnunet.org}, }
@article{1970_0, title = {An Efficient Heuristic Procedure for Partitioning Graphs}, author = {Brian W. Kernighan and S. Lin}, journal = {The Bell System Technical Journal}, volume = {49}, year = {1970}, month = {January}, pages = {291--307}, abstract = {We consider the problem of partitioning the nodes of a graph with costs on its edges into subsets of given sizes so as to minimize the sum of the costs on all edges cut. This problem arises in several physical situations- for example, in assigning the components of electronic circuits to circuit boards to minimize the number of connections between boards. This paper presents a heuristic method for partitioning arbitrary graphs which is both effective in finding optimal partitions, and fast enough to be practical in solving large problems}, www_section = {heuristic method, partitioning graphs}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Kernighan\%20\%26\%20Lin\%20-\%20An\%20Efficient\%20Heuristic\%20Procedure\%20for\%20Partitioning\%20Graphs\%250A.pdf}, url = {https://bibliography.gnunet.org}, }
@article{1970_1, title = {The market for "lemons": Quality uncertainty and the market mechanism}, author = {George A. Akerlof}, journal = {The Quarterly Journal of Economics}, volume = {84}, year = {1970}, month = {August}, pages = {488--500}, abstract = {I. Introduction, 488.--II. The model with automobiles as an example, 489.--III. Examples and applications, 492.--IV. Counteracting institutions, 499.--V. Conclusion, 500}, url = {http://www.jstor.org/stable/1879431}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/akerlof.pdf}, www_section = {Unsorted}, }
@article{1971, title = {The Evolution of Reciprocal Altruism}, author = {Robert L. Trivers}, journal = {The Quarterly Review of Biology}, volume = {46}, year = {1971}, month = {March}, pages = {35--57}, abstract = {A model is presented to account for the natural selection of what is termed reciprocally altruistic behavior. The model shows how selection can operate against the cheater (non-reciprocator) in the system. Three instances of altruistic behavior are discussed, the evolution of which the model can explain: (1) behavior involved in cleaning symbioses; (2) warning cries in birds; and (3) human reciprocal altruism. Regarding human reciprocal altruism, it is shown that the details of the psychological system that regulates this altruism can be explained by the model. Specifically, friendship, dislike, moralistic aggression, gratitude, sympathy, trust, suspicion, trustworthiness, aspects of guilt, and some forms of dishonesty and hypocrisy can be explained as important adaptations to regulate the altruistic system. Each individual human is seen as possessing altruistic and cheating tendencies, the expression of which is sensitive to developmental variables that were selected to set the tendencies at a balance appropriate to the local social and ecological environment}, www_section = {behavior, evolution, reciprocal altruism}, url = {http://www.jstor.org/pss/2822435}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Trivers\%20-\%20The\%20evolution\%20of\%20reciprocal\%20altruism.pdf}, }
@article{1977, title = {Towards a methodology for statistical disclosure control}, author = {Dalenius, T.}, journal = {Statistik Tidskrift}, volume = {15}, year = {1977}, pages = {2--1}, www_section = {database_privacy differential_privacy stat}, url = {https://bibliography.gnunet.org}, }
@conference{1979, title = {Compact Encodings of List Structure}, author = {Daniel G. Bobrow and Douglas W. Clark}, booktitle = {Compact Encodings of List Structure}, organization = {ACM New York, NY, USA}, year = {1979}, publisher = {ACM New York, NY, USA}, abstract = {List structures provide a general mechanism for representing easily changed structured data, but can introduce inefficiencies in the use of space when fields of uniform size are used to contain pointers to data and to link the structure. Empirically determined regularity can be exploited to provide more space-efficient encodings without losing the flexibility inherent in list structures. The basic scheme is to provide compact pointer fields big enough to accommodate most values that occur in them and to provide {\textquotedblleft}escape{\textquotedblright} mechanisms for exceptional cases. Several examples of encoding designs are presented and evaluated, including two designs currently used in Lisp machines. Alternative escape mechanisms are described, and various questions of cost and implementation are discussed. In order to extrapolate our results to larger systems than those measured, we propose a model for the generation of list pointers and we test the model against data from two programs. We show that according to our model, list structures with compact cdr fields will, as address space grows, continue to be compacted well with a fixed-width small field. Our conclusion is that with a microcodable processor, about a factor of two gain in space efficiency for list structure can be had for little or no cost in processing time}, doi = {10.1145/357073.357081}, url = {http://portal.acm.org/citation.cfm?id=357081$\#$collab}, www_section = {Unsorted}, }
@conference{1982, title = {Protocols for Secure Computations}, author = {Yao, Andrew C.}, booktitle = {Proceedings of the 23rd Annual Symposium on Foundations of Computer Science}, organization = {IEEE Computer Society}, year = {1982}, address = {Washington, DC, USA}, publisher = {IEEE Computer Society}, doi = {10.1109/SFCS.1982.88}, url = {http://dx.doi.org/10.1109/SFCS.1982.88}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/ProtocolSecurecomputations1982Yao.pdf}, www_section = {Unsorted}, }
@book{1986, title = {Networks Without User Observability {\textemdash} Design Options}, author = {Pfitzmann, Andreas and Waidner, Michael}, booktitle = {Advances in Cryptology {\textemdash} EUROCRYPT' 85}, organization = {Springer Berlin Heidelberg}, volume = {219}, year = {1986}, pages = {245--253}, editor = {Pichler, Franz}, publisher = {Springer Berlin Heidelberg}, series = {Lecture Notes in Computer Science}, abstract = {In usual communication networks, the network operator or an intruder could easily observe when, how much and with whom the users communicate (traffic analysis), even if the users employ end-to-end encryption. When ISDNs are used for almost everything, this becomes a severe threat. Therefore, we summarize basic concepts to keep the recipient and sender or at least their relationship unobservable, consider some possible implementations and necessary hierarchical extensions, and propose some suitable performance and reliability enhancements}, isbn = {978-3-540-16468-5}, doi = {10.1007/3-540-39805-8_29}, url = {http://dx.doi.org/10.1007/3-540-39805-8_29}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/NetworkWithoutUserObservabiliy1985Pfitzmann.pdf}, www_section = {Unsorted}, }
@conference{1987, title = {How to Play ANY Mental Game or A Completeness Theorem for Protocols with Honest Majority}, author = {Goldreich, O. and Micali, S. and Wigderson, A.}, booktitle = {Proceedings of the Nineteenth Annual ACM Symposium on Theory of Computing}, organization = {ACM}, year = {1987}, address = {New York, NY, USA}, publisher = {ACM}, abstract = {We present a polynomial-time algorithm that, given as a input the description of a game with incomplete information and any number of players, produces a protocol for playing the game that leaks no partial information, provided the majority of the players is honest. Our algorithm automatically solves all the multi-party protocol problems addressed in complexity-based cryptography during the last 10 years. It actually is a completeness theorem for the class of distributed protocols with honest majority. Such completeness theorem is optimal in the sense that, if the majority of the players is not honest, some protocol problems have no efficient solution [C]}, isbn = {0-89791-221-7}, doi = {10.1145/28395.28420}, url = {http://doi.acm.org/10.1145/28395.28420}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/PlayMentalGame1987Goldreich.pdf}, www_section = {Unsorted}, }
@conference{1988_0, title = {Completeness Theorems for Non-cryptographic Fault-tolerant Distributed Computation}, author = {Ben-Or, Michael and Goldwasser, Shafi and Wigderson, Avi}, booktitle = {Proceedings of the Twentieth Annual ACM Symposium on Theory of Computing}, organization = {ACM}, year = {1988}, address = {New York, NY, USA}, publisher = {ACM}, abstract = {Every function of n inputs can be efficiently computed by a complete network of n processors in such a way that: If no faults occur, no set of size t < n/2 of players gets any additional information (other than the function value), Even if Byzantine faults are allowed, no set of size t < n/3 can either disrupt the computation or get additional information. Furthermore, the above bounds on t are tight!}, isbn = {0-89791-264-0}, doi = {10.1145/62212.62213}, url = {http://doi.acm.org/10.1145/62212.62213}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/CompletelenessTheorems1988Ben-Or.pdf}, www_section = {Unsorted}, }
@conference{1988_1, title = {Founding Crytpography on Oblivious Transfer}, author = {Kilian, Joe}, booktitle = {Proceedings of the Twentieth Annual ACM Symposium on Theory of Computing}, organization = {ACM}, year = {1988}, address = {New York, NY, USA}, publisher = {ACM}, abstract = {Suppose your netmail is being erratically censored by Captain Yossarian. Whenever you send a message, he censors each bit of the message with probability 1/2, replacing each censored bit by some reserved character. Well versed in such concepts as redundancy, this is no real problem to you. The question is, can it actually be turned around and used to your advantage? We answer this question strongly in the affirmative. We show that this protocol, more commonly known as oblivious transfer, can be used to simulate a more sophisticated protocol, known as oblivious circuit evaluation([Y]). We also show that with such a communication channel, one can have completely noninteractive zero-knowledge proofs of statements in NP. These results do not use any complexity-theoretic assumptions. We can show that they have applications to a variety of models in which oblivious transfer can be done}, www_section = {oblivious circuits}, isbn = {0-89791-264-0}, doi = {10.1145/62212.62215}, url = {http://doi.acm.org/10.1145/62212.62215}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/oblivious_transfer.pdf}, }
@article{1993_0, title = {Allocative Efficiency of Markets with Zero-Intelligence Traders: Market as a Partial Substitute for Individual Rationality}, author = {Dhananjay K. Gode and Shyam Sunder}, journal = {Journal of Political Economy}, volume = {101}, year = {1993}, month = {February}, pages = {119--137}, abstract = {We report market experiments in which human traders are replaced by "zero-intelligence" programs that submit random bids and offers. Imposing a budget constraint (i.e., not permitting traders to sell below their costs or buy above their values) is sufficient to raise the allocative efficiency of these auctions close to 100 percent. Allocative efficiency of a double auction derives largely from its structure, independent of traders' motivation, intelligence, or learning. Adam Smith's invisible hand may be more powerful than some may have thought; it can generate aggregate rationality not only from individual rationality but also from individual irrationality}, www_section = {allocative efficiency, double auction, market, zero-intelligence trader}, url = {http://www.jstor.org/stable/2138676}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/JPE\%20\%281993\%29\%20-\%20Gode\%20\%26\%20Sunder\%20-\%20Allocative\%20Efficiency.pdf}, }
@book{1993_1, title = {Elliptic Curve Public Key Cryptosystems}, author = {Alfred J. Menezes}, organization = {Springer}, volume = {234}, year = {1993}, pages = {0--144}, publisher = {Springer}, series = {The Springer International Series in Engineering and Computer Science}, abstract = {Elliptic curves have been intensively studied in algebraic geometry and number theory. In recent years they have been used in devising efficient algorithms for factoring integers and primality proving, and in the construction of public key cryptosystems. Elliptic Curve Public Key Cryptosystems provides an up-to-date and self-contained treatment of elliptic curve-based public key cryptology. Elliptic curve cryptosystems potentially provide equivalent security to the existing public key schemes, but with shorter key lengths. Having short key lengths means smaller bandwidth and memory requirements and can be a crucial factor in some applications, for example the design of smart card systems. The book examines various issues which arise in the secure and efficient implementation of elliptic curve systems. Elliptic Curve Public Key Cryptosystems is a valuable reference resource for researchers in academia, government and industry who are concerned with issues of data security. Because of the comprehensive treatment, the book is also suitable for use as a text for advanced courses on the subject}, www_section = {algebraic geometry, elliptic curve cryptography, number theory, public key cryptosystem}, isbn = {978-0-7923-9368-9}, url = {http://books.google.com/books/about/Elliptic_curve_public_key_cryptosystems.html?id=bIb54ShKS68C}, }
@conference{1997_0, title = {Privacy-enhancing Technologies for the Internet}, author = {Ian Goldberg and David Wagner and Eric Brewer}, booktitle = {Compcon '97. Proceedings, IEEE}, organization = {IEEE Computer Society}, year = {1997}, month = {February}, address = {San Jose, CA, United States}, publisher = {IEEE Computer Society}, abstract = {The increased use of the Internet for everyday activities is bringing new threats to personal privacy. This paper gives an overview of existing and potential privacy-enhancing technologies for the Internet, as well as motivation and challenges for future work in this field}, www_section = {Internet, privacy, privacy-enhancing technology}, isbn = {0818678046}, url = {http://www.cs.berkeley.edu/~daw/papers/privacy-compcon97-www/privacy-html.html}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Compcon\%20\%2797\%20-\%20Privacy-enhancing\%20Technologies\%20for\%20the\%20Internet.pdf}, }
@article{1997_1, title = {A Reliable Multicast Framework for Light-weight Sessions and Application Level Framing}, author = {Floyd, Sally and Jacobson, Van and Liu, Ching-Gung and McCanne, Steven and Zhang, Lixia}, journal = {IEEE/ACM Trans. Netw}, volume = {5}, year = {1997}, pages = {784--803}, abstract = {This paper describes SRM (Scalable Reliable Multicast), a reliable multicast framework for light-weight sessions and application level framing. The algorithms of this framework are efficient, robust, and scale well to both very large networks and very large sessions. The SRM framework has been prototyped in wb, a distributed whiteboard application, which has been used on a global scale with sessions ranging from a few to a few hundred participants. The paper describes the principles that have guided the SRM design, including the IP multicast group delivery model, an end-to-end, receiver-based model of reliability, and the application level framing protocol model. As with unicast communications, the performance of a reliable multicast delivery algorithm depends on the underlying topology and operational environment. We investigate that dependence via analysis and simulation, and demonstrate an adaptive algorithm that uses the results of previous loss recovery events to adapt the control parameters used for future loss recovery. With the adaptive algorithm, our reliable multicast delivery algorithm provides good performance over a wide range of underlying topologies}, www_section = {computer network performance, computer networks, Internetworking}, issn = {1063-6692}, doi = {10.1109/90.650139}, url = {http://dx.doi.org/10.1109/90.650139}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Reliable_MultiCast1997Flyod.pdf}, }
@mastersthesis{1999_0, title = {A Distributed Decentralized Information Storage and Retrieval System}, author = {Ian Clarke}, school = {University of Edinburgh}, volume = {PhD}, year = {1999}, abstract = {This report describes an algorithm which if executed by a group of interconnected nodes will provide a robust key-indexed information storage and retrieval system with no element of central control or administration. It allows information to be made available to a large group of people in a similar manner to the "World Wide Web". Improvements over this existing system include:--No central control or administration required--Anonymous information publication and retrieval--Dynamic duplication of popular information--Transfer of information location depending upon demand There is also potential for this system to be used in a modified form as an information publication system within a large organisation which may wish to utilise unused storage space which is distributed across the organisation. The system's reliability is not guaranteed, nor is its efficiency, however the intention is that the efficiency and reliability will be sufficient to make the system useful, and demonstrate that}, url = {https://bibliography.gnunet.org}, %%%%% ERROR: Missing field % www_section = {?????}, }
@conference{1999_1, title = {Public-key Cryptosystems Based on Composite Degree Residuosity Classes}, author = {Paillier, Pascal}, booktitle = {Proceedings of the 17th International Conference on Theory and Application of Cryptographic Techniques}, organization = {Springer-Verlag}, year = {1999}, address = {Berlin, Heidelberg}, publisher = {Springer-Verlag}, abstract = {This paper investigates a novel computational problem, namely the Composite Residuosity Class Problem, and its applications to public-key cryptography. We propose a new trapdoor mechanism and derive from this technique three encryption schemes : a trapdoor permutation and two homomorphic probabilistic encryption schemes computationally comparable to RSA. Our cryptosystems, based on usual modular arithmetics, are provably secure under appropriate assumptions in the standard model}, isbn = {3-540-65889-0}, url = {http://dl.acm.org/citation.cfm?id=1756123.1756146}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/PublicKeyCryptoSystems1999Paillier.pdf}, %%%%% ERROR: Missing field % www_section = {?????}, }
@booklet{200, title = {Defending the Sybil Attack in P2P Networks: Taxonomy, Challenges, and a Proposal for Self-Registration}, author = {Jochen Dinger and Hannes Hartenstein}, journal = {DAS-P2P 2006}, year = {2006}, month = {April}, publisher = {Institut fur Telematik, Universitat Karsruhe (TH), Germany}, abstract = {The robustness of Peer-to-Peer (P2P) networks, in particular of DHT-based overlay networks, suffers significantly when a Sybil attack is performed. We tackle the issue of Sybil attacks from two sides. First, we clarify, analyze, and classify the P2P identifier assignment process. By clearly separating network participants from network nodes, two challenges of P2P networks under a Sybil attack become obvious: i) stability over time, and ii) identity differentiation. Second, as a starting point for a quantitative analysis of time-stability of P2P networks under Sybil attacks and under some assumptions with respect to identity differentiation, we propose an identity registration procedure called self-registration that makes use of the inherent distribution mechanisms of a P2P network}, www_section = {attack, P2P, robustness}, url = {http://dsn.tm.uni-karlsruhe.de/medien/publication-confs/dinger_dasp2p06_sybil.pdf}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.60.8756.pdf}, }
@book{2000, title = {Trust-region methods}, author = {Andrew R. Conn and Nicholas I. M. Gould and Philippe L. Toint}, organization = {Society for Industrial and Applied Mathematics and Mathematical Programming Society}, year = {2000}, address = {Philadelphia, PA}, publisher = {Society for Industrial and Applied Mathematics and Mathematical Programming Society}, series = {MPS-SIAM Series on Optimization}, isbn = {0898714605}, issn = {978-0898714609}, url = {http://books.google.com/books?hl=es\&lr=\&id=5kNC4fqssYQC\&oi=fnd\&pg=PR15\&dq=trust-region+methods\&ots=j1JMMQ3QJY\&sig=ncLlD3mqZ4KEQ1Z9V2qId4rNffo$\#$v=onepage\&q\&f=false}, %%%%% ERROR: Missing field % www_section = {?????}, }
@article{2001_0, title = {Automated Negotiation: Prospects, Methods and Challenges}, author = {Nicholas R Jennings and Peyman Faratin and Alessio R. Lomuscio and Simon Parsons and Carles Sierra and Michael Wooldridge}, journal = {Group Decision and Negociation}, volume = {10}, year = {2001}, month = {March}, pages = {199--215}, abstract = {This paper is to examine the space of negotiation opportunities for autonomous agents, to identify and evaluate some of the key techniques, and to highlight some of the major challenges for future automated negotiation research. This paper is not meant as a survey of the field of automated negotiation. Rather, the descriptions and assessments of the various approaches are generally undertaken with particular reference to work in which the authors have been involved. However, the specific issues raised should be viewed as being broadly applicable}, www_section = {automated negociation autonomous agent, negociation}, doi = {10.1023}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Group\%20Decision\%20and\%20Negociation\%20-\%20Automated\%20Negociation.pdf}, }
@article{2001_1, title = {DVD COPY CONTROL ASSOCIATION vs. ANDREW BUNNER}, author = {unknown}, journal = {unknown}, year = {2001}, %%%%% ERROR: Missing field % www_section = {?????}, }
@conference{2001_2, title = {A Generalisation, a Simplification and Some Applications of Paillier's Probabilistic Public-Key System}, author = {Damg\aard, Ivan and Jurik, Mats}, booktitle = {Proceedings of the 4th International Workshop on Practice and Theory in Public Key Cryptography: Public Key Cryptography}, organization = {Springer-Verlag}, year = {2001}, address = {London, UK, UK}, publisher = {Springer-Verlag}, abstract = {We propose a generalisation of Paillier's probabilistic public key system, in which the expansion factor is reduced and which allows to adjust the block length of the scheme even after the public key has been fixed, without loosing the homomorphic property. We show that the generalisation is as secure as Paillier's original system. We construct a threshold variant of the generalised scheme as well as zero-knowledge protocols to show that a given ciphertext encrypts one of a set of given plaintexts, and protocols to verify multiplicative relations on plaintexts. We then show how these building blocks can be used for applying the scheme to efficient electronic voting.This reduces dramatically the work needed to compute the final result of an election, compared to the previously best known schemes.W e show how the basic scheme for a yes/no vote can be easily adapted to casting a vote for up to t out of L candidates. The same basic building blocks can also be adapted to provide receipt-free elections, under appropriate physical assumptions. The scheme for 1 out of L elections can be optimised such that for a certain range of parameter values, a ballot has size only O(log L) bits}, isbn = {3-540-41658-7}, url = {http://dl.acm.org/citation.cfm?id=648118.746742}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Generalisation2001Damgard.pdf}, %%%%% ERROR: Missing field % www_section = {?????}, }
@conference{2001_3, title = {Investigating the energy consumption of a wireless network interface in an ad hoc networking environment}, author = {Feeney, Laura Marie and Nilsson, Martin}, booktitle = {INFOCOM 2001. Twentieth Annual Joint Conference of the IEEE Computer and Communications Societies. Proceedings. IEEE}, year = {2001}, month = {April}, address = {Anchorage, AK , USA}, abstract = {Energy-aware design and evaluation of network protocols requires knowledge of the energy consumption behavior of actual wireless interfaces. But little practical information is available about the energy consumption behavior of well-known wireless network interfaces and device specifications do not provide information in a form that is helpful to protocol developers. This paper describes a series of experiments which obtained detailed measurements of the energy consumption of an IEEE 802.11 wireless network interface operating in an ad hoc networking environment. The data is presented as a collection of linear equations for calculating the energy consumed in sending, receiving and discarding broadcast and point-to-point data packets of various sizes. Some implications for protocol design and evaluation in ad hoc networks are discussed}, www_section = {ad-hoc networks, energy consumption, IEEE 802.11}, isbn = {0-7803-7016-3}, doi = {10.1109/INFCOM.2001.916651}, url = {http://ieeexplore.ieee.org/xpl/freeabs_all.jsp?arnumber=916651}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/infocom01investigating.pdf}, }
@book{2001_4, title = {Multiparty Computation from Threshold Homomorphic Encryption}, author = {Cramer, Ronald and Damg{\'a}rd, Ivan and Nielsen, JesperB}, booktitle = {Advances in Cryptology {\textemdash} EUROCRYPT 2001}, organization = {Springer Berlin Heidelberg}, volume = {2045}, year = {2001}, pages = {280--300}, editor = {Pfitzmann, Birgit}, publisher = {Springer Berlin Heidelberg}, series = {Lecture Notes in Computer Science}, abstract = {We introduce a new approach to multiparty computation (MPC) basing it on homomorphic threshold crypto-systems. We show that given keys for any sufficiently efficient system of this type,general MPC protocols for n parties can be devised which are secure against an active adversary that corrupts any minority of the parties. The total number of bits broadcast is O(nk|C|),where k is the security parameter and |C| is the size of a (Boolean) circuit computing the function to be securely evaluated. An earlier proposal by Franklin and Haber with the same complexity was only secure for passive adversaries,while all earlier protocols with active security had complexity at least quadratic in n. We give two examples of threshold cryptosystems that can support our construction and lead to the claimed complexities}, isbn = {978-3-540-42070-5}, doi = {10.1007/3-540-44987-6_18}, url = {http://dx.doi.org/10.1007/3-540-44987-6_18}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/MultiPartyComputation2001Cramer.pdf}, %%%%% ERROR: Missing field % www_section = {?????}, }
@book{2001_5, title = {Peer-To-Peer: Harnessing the Power of Disruptive Technologies -- Chapter 12: Free Haven}, author = {Roger Dingledine and Michael J. Freedman and David Molnar}, organization = {O'Reilly Media}, year = {2001}, editor = {Andy oram}, publisher = {O'Reilly Media}, abstract = {Description of the problems that arise when one tries to combine anonymity and accountability. Note that the Free Haven design described here charges for storing data in the network (downloads are free), whereas in GNUnet adding data is free and only the downloads are considered as utilization}, %%%%% ERROR: Missing field % www_section = {?????}, }
@book{2001_6, title = {The Theory of Incentives: The Principal-Agent Model}, author = {Jean-Jacques Laffont and David Martimort}, organization = {Princeton University Press}, year = {2001}, address = {Princeton, New Jersey, USA}, pages = {0--360}, publisher = {Princeton University Press}, abstract = {Economics has much to do with incentives--not least, incentives to work hard, to produce quality products, to study, to invest, and to save. Although Adam Smith amply confirmed this more than two hundred years ago in his analysis of sharecropping contracts, only in recent decades has a theory begun to emerge to place the topic at the heart of economic thinking. In this book, Jean-Jacques Laffont and David Martimort present the most thorough yet accessible introduction to incentives theory to date. Central to this theory is a simple question as pivotal to modern-day management as it is to economics research: What makes people act in a particular way in an economic or business situation? In seeking an answer, the authors provide the methodological tools to design institutions that can ensure good incentives for economic agents. This book focuses on the principal-agent model, the "simple" situation where a principal, or company, delegates a task to a single agent through a contract--the essence of management and contract theory. How does the owner or manager of a firm align the objectives of its various members to maximize profits? Following a brief historical overview showing how the problem of incentives has come to the fore in the past two centuries, the authors devote the bulk of their work to exploring principal-agent models and various extensions thereof in light of three types of information problems: adverse selection, moral hazard, and non-verifiability. Offering an unprecedented look at a subject vital to industrial organization, labor economics, and behavioral economics, this book is set to become the definitive resource for students, researchers, and others who might find themselves pondering what contracts, and the incentives they embody, are really all about}, www_section = {economics, principal-agent model}, isbn = {9780691091846}, url = {http://press.princeton.edu/chapters/i7311.html}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Laffont\%20\%26\%20Martimort\%20-\%20The\%20Theory\%20of\%20Incentives.pdf}, }
@conference{2002_0, title = {Cooperative Backup System}, author = {Sameh Elnikety and Mark Lillibridge and Mike Burrows and Willy Zwaenepoel}, booktitle = {In The USENIX Conf. on File and Storage Technologies}, year = {2002}, abstract = {This paper presents the design of a novel backup system built on top of a peer-to-peer architecture with minimal supporting infrastructure. The system can be deployed for both large-scale and small-scale peer-to-peer overlay networks. It allows computers connected to the Internet to back up their data cooperatively. Each computer has a set of partner computers and stores its backup data distributively among those partners. In return, such a way as to achieve both fault-tolerance and high reliability. This form of cooperation poses several interesting technical challenges because these computers have independent failure modes, do not trust each other, and are subject to third party attacks}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/elnikety.pdf}, %%%%% ERROR: Missing field % www_section = {?????}, }
@conference{2002_1, title = {Finite-length analysis of low-density parity-check codes on the binary erasure channel}, author = {Changyan Di and David Proietti and I. Emre Telatar and Thomas J. Richardson and R{\"u}diger L. Urbanke}, booktitle = {Finite-length analysis of low-density parity-check codes on the binary erasure channel}, year = {2002}, month = {January}, abstract = {In this paper, we are concerned with the finite-length analysis of low-density parity-check (LDPC) codes when used over the binary erasure channel (BEC). The main result is an expression for the exact average bit and block erasure probability for a given regular ensemble of LDPC codes when decoded iteratively. We also give expressions for upper bounds on the average bit and block erasure probability for regular LDPC ensembles and the standard random ensemble under maximum-likelihood (ML) decoding. Finally, we present what we consider to be the most important open problems in this area}, www_section = {BEC, coding theory, low-density parity-check, maximum-likelihood}, doi = {10.1109/TIT.2002.1003839}, url = {http://ieeexplore.ieee.org/xpl/freeabs_all.jsp?arnumber=1003839}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Finite-length\%20analysis\%20of\%20low-density\%20parity-check\%20codes\%20on.pdf}, }
@article{2002_2_GNet, title = {The GNet Whitepaper}, author = {Krista Bennett and Tiberius Stef and Christian Grothoff and Tzvetan Horozov and Ioana Patrascu}, journal = {unknown}, institution = {Purdue University}, year = {2002}, month = {June}, type = {Technical report}, abstract = {This paper describes GNet, a reliable anonymous distributed backup system with reasonable defenses against malicious hosts and low overhead in traffic and CPU time. The system design is described and compared to other publicly used services with similar goals. Additionally, the implementation and the protocols of GNet are presented}, www_section = {anonymity, economics, encoding, GNUnet, obsolete database}, keywords = {anonymity, economics, encoding, GNUnet, obsolete database}, www_tags = {selected}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/main.pdf}, }
@book{2002_3, title = {Mnemosyne: Peer-to-Peer Steganographic Storage}, author = {Hand, Steven and Roscoe, Timothy}, booktitle = {Peer-to-Peer Systems}, organization = {Springer Berlin Heidelberg}, volume = {2429}, year = {2002}, pages = {130--140}, editor = {Druschel, Peter and Kaashoek, Frans and Rowstron, Antony}, publisher = {Springer Berlin Heidelberg}, series = {Lecture Notes in Computer Science}, isbn = {978-3-540-44179-3}, doi = {10.1007/3-540-45748-8_13}, url = {http://dx.doi.org/10.1007/3-540-45748-8_13}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/107.pdf}, %%%%% ERROR: Missing field % www_section = {?????}, }
@conference{2003_0, title = {Bootstrapping a Distributed Computational Economy with Peer-to-Peer Bartering}, author = {Chun, Brent and Yun Fu and Vahdat, Amin}, booktitle = {Proceedings of the 1st Worshop on Economics of Peer-to-Peer Systems}, year = {2003}, month = {June}, address = {Berkeley, CA, USA}, www_section = {bartering, distributed computational economies, peer-to-peer bartering, resource discovery, resource exchange, resource peering}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Workshop\%20on\%20Economics\%20of\%20P2P\%20Systems\%2703\%20-\%20Chun\%2C\%20Fu\%20\%26\%20Vahdat.pdf}, }
@conference{2003_1, title = {The effect of rumor spreading in reputation systems for mobile ad-hoc networks}, author = {Sonja Buchegger and Jean-Yves Le Boudec}, booktitle = {Proceedings of WiOpt {\textquoteleft}03: Modeling and Optimization in Mobile, Ad Hoc and Wireless Networks}, year = {2003}, month = {March}, address = {Sophia-Antipolis, France}, abstract = {Mobile ad-hoc networks rely on the cooperation of nodes for routing and forwarding. For individual nodes there are however several advantages resulting from noncooperation, the most obvious being power saving. Nodes that act selfishly or even maliciously pose a threat to availability in mobile adhoc networks. Several approaches have been proposed to detect noncooperative nodes. In this paper, we investigate the e$\#$ect of using rumors with respect to the detection time of misbehaved nodes as well as the robustness of the reputation system against wrong accusations. We propose a Bayesian approach for reputation representation, updates, and view integration. We also present a mechanism to detect and exclude potential lies. The simulation results indicate that by using this Bayesian approach, the reputation system is robust against slander while still benefitting from the speed-up in detection time provided by the use of rumors}, www_section = {mobile Ad-hoc networks, reputation, reputation system, rumor}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/WiOpt\%2703\%20-\%20Buchegger\%20\%26\%20Le\%20Boudec\%20-\%20Reputation\%20Systems.pdf}, }
@article{2003_10, title = {On the Topology of Overlay-Networks}, author = {Thomas Fuhrmann}, journal = {unknown}, year = {2003}, %%%%% ERROR: Non-ASCII characters: ''\x02'' abstract = {Random-graph models are about to become an important tool in the study of wireless ad-hoc and sensor-networks, peer-to-peer networks, and, generally, overlay-networks. Such models provide a theoretical basis to assess the capabilities of certain networks, and guide the design of new protocols. Especially the recently proposed models for so-called small-world networks receive much attention from the networking community. This paper proposes the use of two more mathematical concepts for the analysis of network topologies, dimension and curvature. These concepts can intuitively be applied to, e.g., sensor-networks. But they can also be sensibly dened for certain other random-graph models. The latter is non-trivial since such models may describe purely virtual networks that do not inherit properties from an underlying physical world. Analysis of a random-graph model for Gnutella-like overlay-networks yields strong indications that such networks might be characterized as a sphere with fractal dimension}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/fuhrmann03topology.pdf}, %%%%% ERROR: Missing field % www_section = {?????}, }
@article{2003_2, title = {The evolution of altruistic punishment}, author = {Robert Boyd and Herbert Gintis and Samuel Bowles and Peter J. Richerson}, journal = {Proceedings of the National Academy of Sciences of the USA}, volume = {100}, year = {2003}, month = {March}, pages = {3531--3535}, abstract = {Both laboratory and field data suggest that people punish noncooperators even in one-shot interactions. Although such {\textquotedblleft}altruistic punishment{\textquotedblright} may explain the high levels of cooperation in human societies, it creates an evolutionary puzzle: existing models suggest that altruistic cooperation among nonrelatives is evolutionarily stable only in small groups. Thus, applying such models to the evolution of altruistic punishment leads to the prediction that people will not incur costs to punish others to provide benefits to large groups of nonrelatives. However, here we show that an important asymmetry between altruistic cooperation and altruistic punishment allows altruistic punishment to evolve in populations engaged in one-time, anonymous interactions. This process allows both altruistic punishment and altruistic cooperation to be maintained even when groups are large and other parameter values approximate conditions that characterize cultural evolution in the small-scale societies in which humans lived for most of our prehistory}, www_section = {altruistic cooperation, altruistic punishment, cooperation, human society, nonrelatives}, doi = {10.1073/pnas.0630443100}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/PNAS\%20\%282003\%29\%20-\%20The\%20evolution\%20of\%20altruistic\%20punishment.pdf}, }
@article{2003_3, title = {Extremum Feedback with Partial Knowledge}, author = {Thomas Fuhrmann and J{\"o}rg Widmer}, journal = {unknown}, volume = {Volume 2816/2003}, year = {2003}, abstract = {A scalable feedback mechanism to solicit feedback from a potentially very large group of networked nodes is an important building block for many network protocols. Multicast transport protocols use it for negative acknowledgements and for delay and packet loss determination. Grid computing and peer-to-peer applications can use similar approaches to find nodes that are, at a given moment in time, best suited to serve a request. In sensor networks, such mechanisms allow to report extreme values in a resource efficient way. In this paper we analyze several extensions to the exponential feedback algorithm [5,6] that provide an optimal way to collect extreme values from a potentially very large group of networked nodes. In contrast to prior work, we focus on how knowledge about the value distribution in the group can be used to optimize the feedback process. We describe the trade-offs that have to be decided upon when using these extensions and provide additional insight into their performance by means of simulation. Furthermore, we briefly illustrate how sample applications can benefit from the proposed mechanisms}, isbn = {978-3-540-20051-2}, issn = {0302-9743}, doi = {10.1007/b13249}, url = {http://www.springerlink.com/content/bvelyaew4ukl4aau/}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/fuhrmann03feedback.pdf}, %%%%% ERROR: Missing field % www_section = {?????}, }
@conference{2003_4, title = {A game theoretic framework for incentives in P2P systems}, author = {Chiranjeeb Buragohain and Dvyakant Agrawal and Subhash Suri}, booktitle = {Proceedings of the 3rd International Conference on Peer-to-Peer Computing}, organization = {IEEE Computer Society}, year = {2003}, month = {September}, address = {Link{\"o}ping, Sweden}, pages = {48--56}, publisher = {IEEE Computer Society}, abstract = {Peer-to-peer (P2P) networks are self-organizing, distributed systems, with no centralized authority or infrastructure. Because of the voluntary participation, the availability of resources in a P2P system can be highly variable and unpredictable. We use ideas from game theory to study the interaction of strategic and rational peers, and propose a differential service-based incentive scheme to improve the system's performance}, www_section = {network, P2P, peer-to-peer networking, system performance}, isbn = {0-7695-2023-5}, doi = {10.1109/PTP.2003.1231503}, url = {10.1109/PTP.2003.1231503}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/P2P\%2703\%20-\%20Buragohain\%2C\%20Agrawal\%20\%26\%20Suri\%20-\%20Incentives\%20in\%20P2P\%20systems.pdf}, }
@conference{2003_5, title = {Incentives build robustness in BitTorrent}, author = {Bram Cohen}, booktitle = {NetEcon'03--Proceedings of the Workshop on Economics of Peer-to-Peer Systems}, year = {2003}, month = {June}, address = {Berkeley, CA, USA}, abstract = {The BitTorrent file distribution system uses tit-for-tat as a method to seeking pareto efficiency. It achieves a higher level of robustness and resource utilization than any currently known cooperative technique. We explain what BitTorrent does, and how economic methods are used to achieve that goal}, www_section = {BitTorrent, resource utilization, robustness}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/NetEcon\%2703\%20-\%20Cohen\%20-\%20Incentives\%20build\%20robustness\%20in\%20BitTorrent.pdf}, }
@conference{2003_6, title = {KARMA: a Secure Economic Framework for P2P Resource Sharing}, author = {Vivek Vishnumurthy and Sangeeth Chandrakumar and Emin G{\"u}n Sirer}, booktitle = {P2PECON'05. Proceedings of the 3rd Workshop on Economics of Peer-to-Peer Systems}, year = {2003}, month = {June}, address = {Berkeley, CA, USA}, abstract = {Peer-to-peer systems are typically designed around the assumption that all peers will willingly contribute resources to a global pool. They thus suffer from freeloaders,that is, participants who consume many more resources than they contribute. In this paper, we propose a general economic framework for avoiding freeloaders in peer-to-peer systems. Our system works by keeping track of the resource consumption and resource contributionof each participant. The overall standing of each}, www_section = {economic framework, freeloader, karma, p2p resource sharing}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/P2PECON\%2705\%20-\%20KARMA.pdf}, }
@book{2003_7, title = {Koorde: A Simple degree-optimal distributed hash table}, author = {Frans M. Kaashoek and David Karger}, booktitle = {Peer-to-Peer Systems II}, organization = {Springer}, volume = {2735/2003}, year = {2003}, address = {Berlin / Heidelberg}, pages = {98--107}, publisher = {Springer}, series = {Lecture Notes in Computer Science}, abstract = {Koorde is a new distributed hash table (DHT) based on Chord 15 and the de Bruijn graphs 2. While inheriting the simplicity of Chord, Koorde meets various lower bounds, such as O(log n) hops per lookup request with only 2 neighbors per node (where n is the number of nodes in the DHT), and O(log n/log log n) hops per lookup request with O(log n) neighbors per node}, www_section = {de Bruijn graph, distributed hash table, Koorde}, doi = {10.1007/b11823}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/koorde.pdf}, }
@conference{2003_8, title = {Revealing Information While Preserving Privacy}, author = {Dinur, Irit and Nissim, Kobbi}, booktitle = {Proceedings of the Twenty-second ACM SIGMOD-SIGACT-SIGART Symposium on Principles of Database Systems}, organization = {ACM}, year = {2003}, address = {New York, NY, USA}, publisher = {ACM}, %%%%% ERROR: Non-ASCII characters: ''⊆Ω√√≈√'' abstract = {We examine the tradeoff between privacy and usability of statistical databases. We model a statistical database by an n-bit string d1 ,.., dn , with a query being a subset q ⊆ [n] to be answered by summation of values which belong to q. Our main result is a polynomial reconstruction algorithm of data from noisy (perturbed) subset sums. Applying this reconstruction algorithm to statistical databases we show that in order to achieve privacy one has to add perturbation of magnitude Ω (√ n). That is, smaller perturbation always results in a strong violation of privacy. We show that this result is tight by exemplifying access algorithms for statistical databases that preserve privacy while adding perturbation of magnitude O (√ n). For time-T bounded adversaries we demonstrate a privacy-preserving access algorithm whose perturbation magnitude is ≈ √T}, www_section = {data reconstruction, integrity and security, subset-sums with noise}, isbn = {1-58113-670-6}, doi = {10.1145/773153.773173}, url = {http://doi.acm.org/10.1145/773153.773173}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/RevelaingInformation2003Dinur.pdf}, }
@conference{2003_9, title = {Scalable Application-level Anycast for Highly Dynamic Groups}, author = {Miguel Castro and Peter Druschel and Anne-Marie Kermarrec and Antony Rowstron}, booktitle = {NGC'03 Networked Group Communication, Fifth International COST264 Workshop}, organization = {Springer}, volume = {2816}, year = {2003}, month = {September}, address = {Munich, Germany}, pages = {47--57}, publisher = {Springer}, series = {Lecture Notes in Computer Science, 2003}, abstract = {We present an application-level implementation of anycast for highly dynamic groups. The implementation can handle group sizes varying from one to the whole Internet, and membership maintenance is efficient enough to allow members to join for the purpose of receiving a single message. Key to this efficiency is the use of a proximity-aware peer-to-peer overlay network for decentralized, lightweight group maintenance; nodes join the overlay once and can join and leave many groups many times to amortize the cost of maintaining the overlay. An anycast implementation with these properties provides a key building block for distributed applications. In particular, it enables management and location of dynamic resources in large scale peer-to-peer systems. We present several resource management applications that are enabled by our implementation}, www_section = {anycast, application-level, highly dynamic groups, peer-to-peer networking}, doi = {10.1007/978-3-540-39405-1_5}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/NGC\%2703\%20-\%20Scalable\%20Application-level\%20Anycast\%20.pdf}, }
@conference{2004.Pang.imc.dns, title = {Availability, Usage, and Deployment Characteristics of the Domain Name System}, author = {Jeffrey Pang and James Hendricks and Aditya Akella and Bruce Maggs and Roberto De Prisco and Seshan, Srinivasan}, booktitle = {IMC'04--Proceedings of the 4th ACM SIGCOMM Conference on Internet Measurement}, organization = {ACM}, year = {2004}, month = {October}, address = {Taormina, Sicily, Italy}, publisher = {ACM}, abstract = {The Domain Name System (DNS) is a critical part of the Internet's infrastructure, and is one of the few examples of a robust, highly-scalable, and operational distributed system. Although a few studies have been devoted to characterizing its properties, such as its workload and the stability of the top-level servers, many key components of DNS have not yet been examined. Based on large-scale measurements taken fromservers in a large content distribution network, we present a detailed study of key characteristics of the DNS infrastructure, such as load distribution, availability, and deployment patterns of DNS servers. Our analysis includes both local DNS servers and servers in the authoritative hierarchy. We find that (1) the vast majority of users use a small fraction of deployed name servers, (2) the availability of most name servers is high, and (3) there exists a larger degree of diversity in local DNS server deployment and usage than for authoritative servers. Furthermore, we use our DNS measurements to draw conclusions about federated infrastructures in general. We evaluate and discuss the impact of federated deployment models on future systems, such as Distributed Hash Tables}, www_section = {availability, DNS, federated}, isbn = {1-58113-821-0}, doi = {http://doi.acm.org/10.1145/1028788.1028790}, url = {http://doi.acm.org/10.1145/1028788.1028790}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/IMC\%2704\%20-\%20Availability\%2C\%20Usage\%2C\%20and\%20Deployment\%20Characteristics\%20of\%20the\%20DNS.pdf}, }
@booklet{2004_0, title = {Apres-a system for anonymous presence}, author = {Laurie, Ben}, year = {2004}, abstract = {If Alice wants to know when Bob is online, and they don't want anyone else to know their interest in each other, what do they do? Once they know they are both online, they would like to be able to exchange messages, send files, make phone calls to each other, and so forth, all without anyone except them knowing they are doing this. Apres is a system that attempts to make this possible}, www_section = {anonymous presence, presence}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/apres.pdf}, }
@article{2004_1, title = {A construction of locality-aware overlay network: mOverlay and its performance}, author = {Xin Yan Zhang and Qian Zhang and Zhang, Zhensheng and Gang Song and Wenwu Zhu}, journal = {IEEE Journal on Selected Areas in Communications}, volume = {22}, year = {2004}, month = {January}, pages = {18--28}, abstract = {There are many research interests in peer-to-peer (P2P) overlay architectures. Most widely used unstructured P2P networks rely on central directory servers or massive message flooding, clearly not scalable. Structured overlay networks based on distributed hash tables (DHT) are expected to eliminate flooding and central servers, but can require many long-haul message deliveries. An important aspect of constructing an efficient overlay network is how to exploit network locality in the underlying network. We propose a novel mechanism, mOverlay, for constructing an overlay network that takes account of the locality of network hosts. The constructed overlay network can significantly decrease the communication cost between end hosts by ensuring that a message reaches its destination with small overhead and very efficient forwarding. To construct the locality-aware overlay network, dynamic landmark technology is introduced. We present an effective locating algorithm for a new host joining the overlay network. We then present a theoretical analysis and simulation results to evaluate the network performance. Our analysis shows that the overhead of our locating algorithm is O(logN), where N is the number of overlay network hosts. Our simulation results show that the average distance between a pair of hosts in the constructed overlay network is only about 11\% of the one in a traditional, randomly connected overlay network. Network design guidelines are also provided. Many large-scale network applications, such as media streaming, application-level multicasting, and media distribution, can leverage mOverlay to enhance their performance}, www_section = {distributed hash table, flooding attacks, overlay networks, P2P}, url = {http://kmweb.twbbs.org/drupal/node/13}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/6-914.ppt}, }
@article{2004_10, title = {Personalized Web search for improving retrieval effectiveness}, author = {Fang Liu and Yu, C. and Weiyi Meng}, journal = {Knowledge and Data Engineering, IEEE Transactions on}, volume = {16}, year = {2004}, month = {January}, pages = {28--40}, abstract = {Current Web search engines are built to serve all users, independent of the special needs of any individual user. Personalization of Web search is to carry out retrieval for each user incorporating his/her interests. We propose a novel technique to learn user profiles from users' search histories. The user profiles are then used to improve retrieval effectiveness in Web search. A user profile and a general profile are learned from the user's search history and a category hierarchy, respectively. These two profiles are combined to map a user query into a set of categories which represent the user's search intention and serve as a context to disambiguate the words in the user's query. Web search is conducted based on both the user query and the set of categories. Several profile learning and category mapping algorithms and a fusion algorithm are provided and evaluated. Experimental results indicate that our technique to personalize Web search is both effective and efficient}, www_section = {BANDWIDTH, category hierarchy, category mapping algorithms, Displays, fusion algorithm, History, human factors, information filtering, information retrieval, libraries, personalized Web search, profile learning, retrieval effectiveness, search engines, search intention, special needs, user interfaces, user profiles, user search histories, Web search, Web search engines}, issn = {1041-4347}, doi = {10.1109/TKDE.2004.1264820}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/PersonalizedWebSearch2004Liu.pdf}, }
@booklet{2004_11, title = {POSIX--Portable Operating System Interface}, author = {The Open Group and IEEE}, journal = {The Open Group Technical Standard Base Specifications, Issue 6}, number = {IEEE Std 1003.n}, year = {2004}, www_section = {API, asynchronous, built-in utility, CPU, file access control mechanism, input/output (I/O), job control, network, portable operating system interface (POSIX), shell, stream, synchronous}, url = {http://pubs.opengroup.org/onlinepubs/009695399/}, }
@conference{2004_12, title = {A Probabilistic Approach to Predict Peers' Performance in P2P Networks}, author = {Zoran Despotovic and Karl Aberer}, booktitle = {CIA 2004. Cooperative Information Agents VIII, 8th International Workshop}, organization = {Springer}, volume = {3191}, year = {2004}, month = {September}, address = {Erfurt, Germany}, pages = {62--76}, publisher = {Springer}, series = {Lecture Notes in Computer Science}, abstract = {The problem of encouraging trustworthy behavior in P2P online communities by managing peers' reputations has drawn a lot of attention recently. However, most of the proposed solutions exhibit the following two problems: huge implementation overhead and unclear trust related model semantics. In this paper we show that a simple probabilistic technique, maximum likelihood estimation namely, can reduce these two problems substantially when employed as the feedback aggregation strategy. Thus, no complex exploration of the feedback is necessary. Instead, simple, intuitive and efficient probabilistic estimation methods suffice}, www_section = {p2p network, peer performance}, doi = {10.1007/978-3-540-30104-2_6}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/CIA\%2704\%20-\%20Despotovic\%20\%26\%20Aberer\%20-\%20Peers\%27\%20performance\%20in\%20P2P\%20networks.pdf}, }
@article{2004_13, title = {Scalable byzantine agreement}, author = {Lewis, Scott and Saia, Jared}, journal = {unknown}, year = {2004}, abstract = {This paper gives a scalable protocol for solving the Byzantine agreement problem. The protocol is scalable in the sense that for Byzantine agreement over n processors, each processor sends and receives only O(log n) messages in expectation. To the best of our knowledge this is the first result for the Byzantine agreement problem where each processor sends and receives o(n) messages. The protocol uses randomness and is correct with high probability. 1 It can tolerate any fraction of faulty processors which is strictly less than 1/6. Our result partially answers the following question posed by Kenneth Birman: {\textquotedblleft}How scalable are the traditional solutions to problems such as Consensus or Byzantine Agreement?{\textquotedblright} [5]}, www_section = {byzantine agreement}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/sba.pdf}, }
@mastersthesis{2004_14, title = {Signaling and Networking in Unstructured Peer-to-Peer Networks}, author = {R{\"u}diger Schollmeier}, school = {Technische Universit{\"a}t M{\"u}nchen}, year = {2004}, month = {September}, address = {Munich, Germany}, pages = {0--177}, type = {Dissertation}, abstract = {This work deals with the efficiency of Peer-to-Peer (P2P) networks, which are distributed and self-organizing overlay networks. We contribute to their understanding and design by using new measurement techniques, simulations and analytical methods. In this context we first present measurement methods and results of P2P networks concerning traffic and topology characteristics as well as concerning user behavior. Based on these results we develop stochastic models to describe the user behavior, the traffic and the topology of P2P networks analytically. Using the results of our measurements and analytical investigations, we develop new P2P architectures to improve the efficiency of P2P networks concerning their topology and their signaling traffic. Finally we verify our results for the new architectures by measurements as well as computer-based simulations on different levels of detail}, www_section = {application model, communication network, compression, content availability, cross layer communication, generating functions, overlay networks, random graph theory, self-organization, signaling traffic, simulation, topology measurement, traffic measurement, user model}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Schollmeier\%20-\%20Signaling\%20and\%20networking\%20in\%20unstructured\%20p2p\%20networks.pdf}, }
@mastersthesis{2004_2, title = {The Decentralised Coordination of Self-Adaptive Components for Autonomic Distributed Systems}, author = {Jim Dowling}, school = {University of Dublin}, volume = {Doctor of Philosophy}, year = {2004}, month = {October}, address = {Dublin, Ireland}, pages = {0--214}, www_section = {autonomic distributed system, descentralised coordination}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Thesis\%20-\%20Autonomic\%20distributed\%20systems.pdf}, }
@conference{2004_3, title = {Designing Incentive mechanisms for peer-to-peer systems}, author = {John Chuang}, booktitle = {GECON 2004. 1st IEEE International Workshop on Grid Economics and Business Models}, organization = {IEEE Computer Society}, year = {2004}, month = {April}, address = {Seoul, South Corea}, pages = {67--81}, publisher = {IEEE Computer Society}, abstract = {From file-sharing to mobile ad-hoc networks, community networking to application layer overlays, the peer-to-peer networking paradigm promises to revolutionize the way we design, build and use the communications network of tomorrow, transform the structure of the communications industry, and challenge our understanding of markets and democracies in a digital age. The fundamental premise of peer-to-peer systems is that individual peers voluntarily contribute resources to the system. We discuss some of the research opportunities and challenges in the design of incentive mechanisms for P2P systems}, www_section = {incentives, P2P, peer-to-peer networking}, isbn = {0-7803-8525-X}, doi = {10.1109/GECON.2004.1317584}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/GECON\%2704\%20-\%20Designing\%20incentive\%20mechanisms\%20for\%20p2p\%20systems.pdf}, }
@book{2004_4, title = {Efficient Private Matching and Set Intersection}, author = {Freedman, MichaelJ. and Nissim, Kobbi and Pinkas, Benny}, booktitle = {Advances in Cryptology--EUROCRYPT 2004}, organization = {Springer Berlin Heidelberg}, volume = {3027}, year = {2004}, pages = {1--19}, editor = {Cachin, Christian and Camenisch, Jan L}, publisher = {Springer Berlin Heidelberg}, series = {Lecture Notes in Computer Science}, abstract = {We consider the problem of computing the intersection of private datasets of two parties, where the datasets contain lists of elements taken from a large domain. This problem has many applications for online collaboration. We present protocols, based on the use of homomorphic encryption and balanced hashing, for both semi-honest and malicious environments. For lists of length k, we obtain O(k) communication overhead and O(k ln ln k) computation. The protocol for the semi-honest environment is secure in the standard model, while the protocol for the malicious environment is secure in the random oracle model. We also consider the problem of approximating the size of the intersection, show a linear lower-bound for the communication overhead of solving this problem, and provide a suitable secure protocol. Lastly, we investigate other variants of the matching problem, including extending the protocol to the multi-party setting as well as considering the problem of approximate matching}, isbn = {978-3-540-21935-4}, doi = {10.1007/978-3-540-24676-3_1}, url = {http://dx.doi.org/10.1007/978-3-540-24676-3_1}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/EffecitvePrivateMatching2004Freedman.pdf}, %%%%% ERROR: Missing field % www_section = {?????}, }
@article{2004_5, title = {Enhancing Web privacy and anonymity in the digital era}, author = {Stefanos Gritzalis}, journal = {Information Management \& Computer Security}, volume = {12}, year = {2004}, month = {January}, pages = {255--287}, type = {survey}, abstract = {This paper presents a state-of-the-art review of the Web privacy and anonymity enhancing security mechanisms, tools, applications and services, with respect to their architecture, operational principles and vulnerabilities. Furthermore, to facilitate a detailed comparative analysis, the appropriate parameters have been selected and grouped in classes of comparison criteria, in the form of an integrated comparison framework. The main concern during the design of this framework was to cover the confronted security threats, applied technological issues and users' demands satisfaction. GNUnet's Anonymity Protocol (GAP), Freedom, Hordes, Crowds, Onion Routing, Platform for Privacy Preferences (P3P), TRUSTe, Lucent Personalized Web Assistant (LPWA), and Anonymizer have been reviewed and compared. The comparative review has clearly highlighted that the pros and cons of each system do not coincide, mainly due to the fact that each one exhibits different design goals and thus adopts dissimilar techniques for protecting privacy and anonymity}, www_section = {anonymity, GNUnet, onion routing}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/p255.pdf}, }
@book{2004_6, title = {Group Spreading: A Protocol for Provably Secure Distributed Name Service}, author = {Awerbuch, Baruch and Scheideler, Christian}, booktitle = {Automata, Languages and Programming}, organization = {Springer Berlin Heidelberg}, volume = {3142}, year = {2004}, pages = {183--195}, editor = {D{\'\i}az, Josep and Karhum{\"a}ki, Juhani and Lepist{\"o}, Arto and Sannella, Donald}, publisher = {Springer Berlin Heidelberg}, series = {Lecture Notes in Computer Science}, isbn = {978-3-540-22849-3}, doi = {10.1007/978-3-540-27836-8_18}, url = {http://dx.doi.org/10.1007/978-3-540-27836-8_18}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/p_icalp04_0.pdf}, %%%%% ERROR: Missing field % www_section = {?????}, }
@book{2004_7, title = {An Introduction to Auction Theory}, author = {Flavio M. Menezes and Paulo K. Monteiro}, organization = {Oxford University Press}, year = {2004}, edition = {1st}, pages = {0--199}, publisher = {Oxford University Press}, abstract = {This book presents an in-depth discussion of the auction theory. It introduces the concept of Bayesian Nash equilibrium and the idea of studying auctions as games. Private, common, and affiliated values models and multi-object auction models are described. A general version of the Revenue Equivalence Theorem is derived and the optimal auction is characterized to relate the field of mechanism design to auction theory}, www_section = {affiliated values model, auction theory, Bayesian Nash equilibrium, common values model, multiple objects, private values model, Revenue Equivalence Theorem}, isbn = {9780199275984}, doi = {10.1093/019927598X.001.0001}, url = {http://www.oxfordscholarship.com/view/10.1093/019927598X.001.0001/acprof-9780199275984}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Menezes\%20\%26\%20Monteiro\%20-\%20An\%20Introduction\%20to\%20Auction\%20Theory.pdf}, }
@conference{2004_8, title = {A Peer-to-Peer File Sharing System for Wireless Ad-Hoc Networks}, author = {unknown}, booktitle = {A Peer-to-Peer File Sharing System for Wireless Ad-Hoc Networks}, year = {2004}, editor = {Hasan S{\"o}zer and Metin Kekkalmaz and Ibrahim K{\"o}rpeoglu}, abstract = {File sharing in wireless ad-hoc networks in a peer to peer manner imposes many challenges that make conventional peer-to-peer systems operating on wire-line networks inapplicable for this case. Information and workload distribution as well as routing are major problems for members of a wireless ad-hoc network, which are only aware of their neighborhood. In this paper we propose a system that solves peer-to-peer filesharing problem for wireless ad-hoc networks. Our system works according to peer-to-peer principles, without requiring a central server, and distributes information regarding the location of shared files among members of the network. By means of a {\textquotedblleft}hashline{\textquotedblright} and forming a tree-structure based on the topology of the network, the system is able to answer location queries, and also discover and maintain routing information that is used to transfer files from a source-peer to another peer}, www_section = {ad-hoc networks, file systems, P2P}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.124.9928.pdf}, }
@article{2004_9, title = {Peer-to-Peer Networking \& -Computing}, author = {Ralf Steinmetz and Klaus Wehrle}, journal = {Informatik Spektrum}, volume = {27}, year = {2004}, month = {February}, pages = {51--54}, abstract = {Unter dem Begriff Peer-to-Peer etabliert sich ein h{\"o}chst interessantes Paradigma f{\"u}r die Kommunikation im Internet. Obwohl urspr{\"u}nglich nur f{\"u}r die sehr pragmatischen und rechtlich umstrittenen Dateitauschb{\"o}rsen entworfen, k{\"o}nnen die Peerto-Peer-Mechanismen zur verteilten Nutzung unterschiedlichster Betriebsmittel genutzt werden und neue M{\"o}glichkeiten f{\"u}r Internetbasierte Anwendungen er{\"o}ffnen}, www_section = {computing, networking, peer-to-peer networking}, doi = {10.1007/s00287-003-0362-9}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Informatik\%20Spektrum\%20-\%20Peer-to-peer\%20networking\%20\%26\%20-computing.pdf}, }
@conference{2005_0, title = {Chainsaw: Eliminating Trees from Overlay Multicast}, author = {Vinay Pai and Kapil Kumar and Karthik Tamilmani and Vinay Sambamurthy and Alexander E. Mohr}, booktitle = {4th International Workshop}, organization = {Springer Berlin / Heidelberg}, volume = {3640}, year = {2005}, month = {November}, address = {Ithaca, NY, USA}, pages = {127--140}, editor = {Miguel Castro and Robbert Van Renesse}, publisher = {Springer Berlin / Heidelberg}, series = {Lecture Notes in Computer Science (Peer-to-peer Systems IV)}, abstract = {In this paper, we present Chainsaw, a p2p overlay multicast system that completely eliminates trees. Peers are notified of new packets by their neighbors and must explicitly request a packet from a neighbor in order to receive it. This way, duplicate data can be eliminated and a peer can ensure it receives all packets. We show with simulations that Chainsaw has a short startup time, good resilience to catastrophic failure and essentially no packet loss. We support this argument with real-world experiments on Planetlab and compare Chainsaw to Bullet and Splitstream using MACEDON}, www_section = {chainsaw, p2p overlay multicast system, packet loss, trees}, isbn = {978-3-540-29068-1}, issn = {1611-3349 (Online)}, doi = {10.1007/11558989}, url = {http://www.springerlink.com/content/l13550223q12l65v/about/}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/chainsaw.pdf}, }
@article{2005_1, title = {Cooperation among strangers with limited information about reputation}, author = {Gary E. Bolton and Elena Katok and Axel Ockenfels}, journal = {Journal of Public Economics}, volume = {89}, year = {2005}, month = {August}, pages = {1457--1468}, abstract = {The amount of institutional intervention necessary to secure efficiency-enhancing cooperation in markets and organizations, in circumstances where interactions take place among essentially strangers, depends critically on the amount of information informal reputation mechanisms need transmit. Models based on subgame perfection find that the information necessary to support cooperation is recursive in nature and thus information generating and processing requirements are quite demanding. Models that do not rely on subgame perfection, on the other hand, suggest that the information demands may be quite modest. The experiment we present indicates that even without any reputation information there is a non-negligible amount of cooperation that is, however, quite sensitive to the cooperation costs. For high costs, providing information about a partner's immediate past action increases cooperation. Recursive information about the partners' previous partners' reputation further promotes cooperation, regardless of the cooperation costs}, www_section = {cooperation, experimental economics, reputation}, doi = {doi:10.1016/j.jpubeco.2004.03.008}, url = {doi:10.1016/j.jpubeco.2004.03.008}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Journal\%20of\%20Public\%20Economics\%20-\%20Bolton\%2C\%20Katok\%20\%26\%20Ockenfels.pdf}, }
@article{2005_10, title = {Privacy Practices of Internet Users: Self-reports Versus Observed Behavior}, author = {Jensen, Carlos and Potts, Colin and Jensen, Christian}, journal = {Int. J. Hum.-Comput. Stud}, volume = {63}, year = {2005}, pages = {203--227}, abstract = {Several recent surveys conclude that people are concerned about privacy and consider it to be an important factor in their online decision making. This paper reports on a study in which (1) user concerns were analysed more deeply and (2) what users said was contrasted with what they did in an experimental e-commerce scenario. Eleven independent variables were shown to affect the online behavior of at least some groups of users. Most significant were trust marks present on web pages and the existence of a privacy policy, though users seldom consulted the policy when one existed. We also find that many users have inaccurate perceptions of their own knowledge about privacy technology and vulnerabilities, and that important user groups, like those similar to the Westin "privacy fundamentalists", do not appear to form a cohesive group for privacy-related decision making.In this study we adopt an experimental economic research paradigm, a method for examining user behavior which challenges the current emphasis on survey data. We discuss these issues and the implications of our results on user interpretation of trust marks and interaction design. Although broad policy implications are beyond the scope of this paper, we conclude by questioning the application of the ethical/legal doctrine of informed consent to online transactions in the light of the evidence that users frequently do not consult privacy policies}, www_section = {decision-making, design, e-commerce, economic models, policy, privacy, survey}, issn = {1071-5819}, doi = {10.1016/j.ijhcs.2005.04.019}, url = {http://dx.doi.org/10.1016/j.ijhcs.2005.04.019}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/PrivacyPractices2005Jensen.pdf}, }
@book{2005_11, title = {Privacy-Preserving Set Operations}, author = {Kissner, Lea and Song, Dawn}, booktitle = {Advances in Cryptology -- CRYPTO 2005}, organization = {Springer Berlin Heidelberg}, volume = {3621}, year = {2005}, pages = {241--257}, editor = {Shoup, Victor}, publisher = {Springer Berlin Heidelberg}, series = {Lecture Notes in Computer Science}, abstract = {In many important applications, a collection of mutually distrustful parties must perform private computation over multisets. Each party's input to the function is his private input multiset. In order to protect these private sets, the players perform privacy-preserving computation; that is, no party learns more information about other parties' private input sets than what can be deduced from the result. In this paper, we propose efficient techniques for privacy-preserving operations on multisets. By building a framework of multiset operations, employing the mathematical properties of polynomials, we design efficient, secure, and composable methods to enable privacy-preserving computation of the union, intersection, and element reduction operations. We apply these techniques to a wide range of practical problems, achieving more efficient results than those of previous work}, isbn = {978-3-540-28114-6}, doi = {10.1007/11535218_15}, url = {http://dx.doi.org/10.1007/11535218_15}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/PrivacyPreservingSetOperations2005Kissner.pdf}, %%%%% ERROR: Missing field % www_section = {?????}, }
@book{2005_12, title = {On Private Scalar Product Computation for Privacy-Preserving Data Mining}, author = {Goethals, Bart and Laur, Sven and Lipmaa, Helger and Mielik{\"a}inen, Taneli}, booktitle = {Information Security and Cryptology -- ICISC 2004}, organization = {Springer Berlin Heidelberg}, volume = {3506}, year = {2005}, pages = {104--120}, editor = {Park, Choon-sik and Chee, Seongtaek}, publisher = {Springer Berlin Heidelberg}, series = {Lecture Notes in Computer Science}, abstract = {In mining and integrating data from multiple sources, there are many privacy and security issues. In several different contexts, the security of the full privacy-preserving data mining protocol depends on the security of the underlying private scalar product protocol. We show that two of the private scalar product protocols, one of which was proposed in a leading data mining conference, are insecure. We then describe a provably private scalar product protocol that is based on homomorphic encryption and improve its efficiency so that it can also be used on massive datasets}, www_section = {Privacy-preserving data mining, private scalar product protocol, vertically partitioned frequent pattern mining}, isbn = {978-3-540-26226-8}, doi = {10.1007/11496618_9}, url = {http://dx.doi.org/10.1007/11496618_9}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/PrivateScalarProduct2004Goethals.pdf}, }
@article{2005_13, title = {A Quick Introduction to Bloom Filters}, author = {Christian Grothoff}, journal = {unknown}, institution = {The GNUnet Project}, year = {2005}, month = {August}, www_section = {Bloom filter, GNUnet}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/bloomfilter.pdf}, }
@conference{2005_2, title = {Correctness of a gossip based membership protocol}, author = {Andre Allavena and Alan Demers and John E. Hopcroft}, booktitle = {PDOC'05}, organization = {ACM}, year = {2005}, publisher = {ACM}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/gossip-podc05.pdf}, %%%%% ERROR: Missing field % www_section = {?????}, }
@book{2005_3, title = {Distributed Hash Tables}, author = {Klaus Wehrle and G{\"o}tz, Stefan and Rieche, Simon}, booktitle = {Peer-to-Peer Systems and Applications}, organization = {Springer}, volume = {3485}, year = {2005}, chapter = {7}, publisher = {Springer}, series = {Lecture Notes in Computer Science}, abstract = {In the last few years, an increasing number of massively distributed systems with millions of participants has emerged within very short time frames. Applications, such as instant messaging, file-sharing, and content distribution have attracted countless numbers of users. For example, Skype gained more than 2.5 millions of users within twelve months, and more than 50\% of Internet traffic is originated by BitTorrent. These very large and still rapidly growing systems attest to a new era for the design and deployment of distributed systems. In particular, they reflect what the major challenges are today for designing and implementing distributed systems: scalability, flexibility, and instant deployment}, www_section = {distributed hash table}, doi = {10.1007/11530657_7}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/LNCS\%20-\%20Distributed\%20Hash\%20Tables.pdf}, }
@conference{2005_4, title = {An empirical study of free-riding behavior in the maze p2p file-sharing system}, author = {Yang, Mao and Zhang, Zheng and Li, Xiaoming and Dai, Yafei}, booktitle = {Proceedings of the 4th international conference on Peer-to-Peer Systems}, organization = {Springer-Verlag}, year = {2005}, address = {Berlin, Heidelberg}, publisher = {Springer-Verlag}, www_section = {free-riding, incentives, Sybil attack}, isbn = {3-540-29068-0, 978-3-540-29068-1}, doi = {10.1007/11558989_17}, url = {http://dx.doi.org/10.1007/11558989_17}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/maze_freeride.pdf}, }
@conference{2005_5, title = {Exchange-based incentive mechanisms for peer-to-peer file sharing}, author = {Kostas G. Anagnostakis and Michael B. Greenwald}, booktitle = {Proceedings of International Conference on Distributed Computing Systems 2004}, organization = {IEEE Computer Society}, year = {2005}, month = {March}, address = {Tokyo, Japan}, pages = {524--533}, publisher = {IEEE Computer Society}, abstract = {Performance of peer-to-peer resource sharing networks depends upon the level of cooperation of the participants. To date, cash-based systems have seemed too complex, while lighter-weight credit mechanisms have not provided strong incentives for cooperation. We propose exchange-based mechanisms that provide incentives for cooperation in peer-to-peer file sharing networks. Peers give higher service priority to requests from peers that can provide a simultaneous and symmetric service in return. We generalize this approach to n-way exchanges among rings of peers and present a search algorithm for locating such rings. We have used simulation to analyze the effect of exchanges on performance. Our results show that exchange-based mechanisms can provide strong incentives for sharing, offering significant improvements in service times for sharing users compared to free-riders, without the problems and complexity of cash- or credit-based systems}, www_section = {exchange-based mechanism, peer-to-peer networking, sharing}, isbn = {0-7695-2086-3}, doi = {10.1109/ICDCS.2004.1281619}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/ICDCS\%2704.pdf}, }
@conference{2005_6, title = {Fuzzy Identity-Based Encryption}, author = {Amit Sahai and Waters, Brent}, booktitle = {EUROCRYPT'05 Workshop on the Theory and Application of of Cryptographic Techniques}, organization = {Springer}, volume = {3494}, year = {2005}, month = {May}, address = {Aarhus, Denmark}, publisher = {Springer}, series = {Lecture Notes in Computer Science}, %%%%% ERROR: Non-ASCII characters: ''ωωωω'' abstract = {We introduce a new type of Identity-Based Encryption (IBE) scheme that we call Fuzzy Identity-Based Encryption. In Fuzzy IBE we view an identity as set of descriptive attributes. A Fuzzy IBE scheme allows for a private key for an identity, ω, to decrypt a ciphertext encrypted with an identity, ω , if and only if the identities ω and ω are close to each other as measured by the {\textquotedblleft}set overlap{\textquotedblright} distance metric. A Fuzzy IBE scheme can be applied to enable encryption using biometric inputs as identities; the error-tolerance property of a Fuzzy IBE scheme is precisely what allows for the use of biometric identities, which inherently will have some noise each time they are sampled. Additionally, we show that Fuzzy-IBE can be used for a type of application that we term {\textquotedblleft}attribute-based encryption{\textquotedblright}. In this paper we present two constructions of Fuzzy IBE schemes. Our constructions can be viewed as an Identity-Based Encryption of a message under several attributes that compose a (fuzzy) identity. Our IBE schemes are both error-tolerant and secure against collusion attacks. Additionally, our basic construction does not use random oracles. We prove the security of our schemes under the Selective-ID security model}, www_section = {Fuzzy IBE, IBE}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/EUROCRYPT\%2705\%20-\%20Fuzzy\%20Identity-Based\%20Encryption.pdf}, }
@conference{2005_8, title = {OpenDHT: a public DHT service and its uses}, author = {unknown}, booktitle = {Proceedings of the 2005 conference on Applications, technologies, architectures, and protocols for computer communications}, organization = {ACM}, year = {2005}, address = {New York, NY, USA}, pages = {73--84}, publisher = {ACM}, series = {SIGCOMM '05}, www_section = {distributed hash table, openDHT, peer-to-peer, resource allocation}, isbn = {1-59593-009-4}, doi = {10.1145/1080091.1080102}, url = {http://doi.acm.org/10.1145/1080091.1080102}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/openDHT.pdf}, }
@article{2005_9, title = {P2P Contracts: a Framework for Resource and Service Exchange}, author = {Dipak Ghosal and Benjamin K. Poon and Keith Kong}, journal = {FGCS. Future Generations Computer Systems}, volume = {21}, year = {2005}, month = {March}, pages = {333--347}, abstract = {A crucial aspect of Peer-to-Peer (P2P) systems is that of providing incentives for users to contribute their resources to the system. Without such incentives, empirical data show that a majority of the participants act asfree riders. As a result, a substantial amount of resource goes untapped, and, frequently, P2P systems devolve into client-server systems with attendant issues of performance under high load. We propose to address the free rider problem by introducing the notion of a P2P contract. In it, peers are made aware of the benefits they receive from the system as a function of their contributions. In this paper, we first describe a utility-based framework to determine the components of the contract and formulate the associated resource allocation problem. We consider the resource allocation problem for a flash crowd scenario and show how the contract mechanism implemented using a centralized server can be used to quickly create pseudoservers that can serve out the requests. We then study a decentralized implementation of the P2P contract scheme in which each node implements the contract based on local demand. We show that in such a system, other than contributing storage and bandwidth to serve out requests, it is also important that peer nodes function as application-level routers to connect pools of available pseudoservers. We study the performance of the distributed implementation with respect to the various parameters including the terms of the contract and the triggers to create pseudoservers and routers}, www_section = {contracts, framework, P2P, peer-to-peer networking, resource exchange, service exchange}, issn = {0167-739X}, doi = {10.1016/j.future.2004.04.013}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/FGCS\%20-\%20P2P\%20Contracts\%3A\%20a\%20Framework\%20for\%20Resource\%20and\%20Service\%20Exchange.pdf}, }
@mastersthesis{2006_0, title = {Access Control in Peer-to-Peer Storage Systems}, author = {Erol Ko{\c c}}, school = {Eidgen{\"o}ssische Technische Hochschule Z{\"u}rich (ETH)}, volume = {Communication Systems}, year = {2006}, month = {October}, address = {Zurich, Switzerland}, pages = {0--159}, type = {Master's Thesis}, www_section = {access control, peer-to-peer storage system}, url = {http://webcache.googleusercontent.com/u/ethweb?oe=utf8\&GO.x=0\&GO.y=0\&hl=es\&q=cache:7sJLnyzj1TcJ:http://www.zisc.ethz.ch/events/ISC20067Slides/MA_Report_Erol_Koc.pdf+Erol+Ko\%C3\%A7\&ct=clnk}, }
@conference{2006_1, title = {Combating Hidden Action in Unstructured Peer-to-Peer Systems}, author = {Qi Zhao and Jianzhong Zhang and Jingdong Xu}, booktitle = {ChinaCom '06. First International Conference on Communications and Networking in China}, organization = {IEEE Computer Society}, year = {2006}, month = {October}, address = {Beijing, China}, pages = {1--5}, publisher = {IEEE Computer Society}, abstract = {In unstructured peer-to-peer systems, cooperation by the intermediate peers are essential for the success of queries. However, intermediate peers may choose to forward packets at a low priority or not forward the packets at all, which is referred as peers' hidden action. Hidden action may lead to significant decrement of search efficiency. In contrast to building a global system with reputations or economics, we proposed MSSF, an improved search method, to help queries route around the peers with hidden action. MSSF does not need to check other peers' behavior. It automatically adapts to change query routes according to the previous query results. Simulation results show that MSSF is more robust than Gnutella flooding when peers with hidden action increase}, www_section = {cooperation, hidden action, unstructured peer-to-peer system}, isbn = {1-4244-0463-0}, doi = {http://dx.doi.org/10.1109/CHINACOM.2006.344762}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/ChinaCom\%2706\%20-\%20Combating\%20hidden\%20action\%20in\%20unstructured\%20p2p\%20systems.pdf}, }
@book{2006_10, title = {DNS-Based Service Discovery in Ad Hoc Networks: Evaluation and Improvements}, author = {Celeste Campo and Carlos Garc{\'\i}a-Rubio}, organization = {Springer Berlin / Heidelberg}, volume = {Volume 4217/2006}, year = {2006}, publisher = {Springer Berlin / Heidelberg}, abstract = {In wireless networks, devices must be able to dynamically discover and share services in the environment. The problem of service discovery has attracted great research interest in the last years, particularly for ad hoc networks. Recently, the IETF has proposed the use of the DNS protocol for service discovery. For ad hoc networks, the IETF works in two proposals of distributed DNS, Multicast DNS and LLMNR, that can both be used for service discovery. In this paper we describe and compare through simulation the performance of service discovery based in these two proposals of distributed DNS. We also propose four simple improvements that reduce the traffic generated, and so the power consumption, especially of the most limited, battery powered, devices. We present simulation results that show the impact of our improvements in a typical scenario}, www_section = {ad-hoc networks, DNS}, issn = {978-3-540-45174-7}, doi = {10.1007/11872153}, url = {http://www.springerlink.com/content/m8322m1006416270/}, }
@conference{2006_11, title = {Improving traffic locality in BitTorrent via biased neighbor selection}, author = {Ruchir Bindal and Pei Cao and William Chan and Jan Medved and George Suwala and Tony Bates and Amy Zhang}, booktitle = {Proceedings of the 26th IEEE International Conference on Distributed Computing Systems}, organization = {IEEE Computer Society}, year = {2006}, month = {January}, address = {Lisboa, Portugal}, pages = {0--66}, publisher = {IEEE Computer Society}, abstract = {Peer-to-peer (P2P) applications such as BitTorrent ignore traffic costs at ISPs and generate a large amount of cross-ISP traffic. As a result, ISPs often throttle BitTorrent traffic to control the cost. In this paper, we examine a new approach to enhance BitTorrent traffic locality, biased neighbor selection, in which a peer chooses the majority, but not all, of its neighbors from peers within the same ISP. Using simulations, we show that biased neighbor selection maintains the nearly optimal performance of Bit- Torrent in a variety of environments, and fundamentally reduces the cross-ISP traffic by eliminating the traffic's linear growth with the number of peers. Key to its performance is the rarest first piece replication algorithm used by Bit- Torrent clients. Compared with existing locality-enhancing approaches such as bandwidth limiting, gateway peers, and caching, biased neighbor selection requires no dedicated servers and scales to a large number of BitTorrent networks}, www_section = {BitTorrent, neighbor selection, peer-to-peer networking, performance, traffic locality}, isbn = {0-7695-2540-7}, issn = {1063-6927}, doi = {10.1109/ICDCS.2006.48}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/ICDCS\%2706\%20-\%20Improving\%20traffic\%20locality\%20in\%20BitTorrent.pdf}, }
@book{2006_12, title = {Less Hashing, Same Performance: Building a Better Bloom Filter}, author = {Kirsch, Adam and Mitzenmacher, Michael}, booktitle = {Algorithms -- ESA 2006}, organization = {Springer Berlin Heidelberg}, volume = {4168}, year = {2006}, pages = {456--467}, editor = {Azar, Yossi and Erlebach, Thomas}, publisher = {Springer Berlin Heidelberg}, series = {Lecture Notes in Computer Science}, abstract = {A standard technique from the hashing literature is to use two hash functions h1(x) and h2(x) to simulate additional hash functions of the form gi (x) = h1(x) + ih2(x). We demonstrate that this technique can be usefully applied to Bloom filters and related data structures. Specifically, only two hash functions are necessary to effectively implement a Bloom filter without any loss in the asymptotic false positive probability. This leads to less computation and potentially less need for randomness in practice}, isbn = {978-3-540-38875-3}, doi = {10.1007/11841036_42}, url = {http://dx.doi.org/10.1007/11841036_42}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/LessHashing2006Kirsch.pdf}, %%%%% ERROR: Missing field % www_section = {?????}, }
@conference{2006_13, title = {Our Data, Ourselves: Privacy via Distributed Noise Generation}, author = {Dwork, Cynthia and Kenthapadi, Krishnaram and McSherry, Frank and Mironov, Ilya and Naor, Moni}, booktitle = {Proceedings of the 24th Annual International Conference on The Theory and Applications of Cryptographic Techniques}, organization = {Springer-Verlag}, year = {2006}, address = {Berlin, Heidelberg}, publisher = {Springer-Verlag}, abstract = {In this work we provide efficient distributed protocols for generating shares of random noise, secure against malicious participants. The purpose of the noise generation is to create a distributed implementation of the privacy-preserving statistical databases described in recent papers [14, 4, 13]. In these databases, privacy is obtained by perturbing the true answer to a database query by the addition of a small amount of Gaussian or exponentially distributed random noise. The computational power of even a simple form of these databases, when the query is just of the form sum over all rows 'i' in the database of a function f applied to the data in row i, has been demonstrated in [4]. A distributed implementation eliminates the need for a trusted database administrator. The results for noise generation are of independent interest. The generation of Gaussian noise introduces a technique for distributing shares of many unbiased coins with fewer executions of verifiable secret sharing than would be needed using previous approaches (reduced by a factor of n). The generation of exponentially distributed noise uses two shallow circuits: one for generating many arbitrarily but identically biased coins at an amortized cost of two unbiased random bits apiece, independent of the bias, and the other to combine bits of appropriate biases to obtain an exponential distribution}, isbn = {3-540-34546-9, 978-3-540-34546-6}, doi = {10.1007/11761679_29}, url = {http://dx.doi.org/10.1007/11761679_29}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/OurData2006Dwork.pdf}, %%%%% ERROR: Missing field % www_section = {?????}, }
@conference{2006_14, title = {Peer to peer size estimation in large and dynamic networks: A comparative study}, author = {Erwan Le Merrer and Anne-Marie Kermarrec and Massouli{\'e}, Laurent}, booktitle = {HPDC'06--15th IEEE International Symposium on High Performance Distributed Computing}, organization = {IEEE Computer Society}, year = {2006}, month = {June}, address = {Paris, France}, publisher = {IEEE Computer Society}, abstract = {As the size of distributed systems keeps growing, the peer to peer communication paradigm has been identified as the key to scalability. Peer to peer overlay networks are characterized by their self-organizing capabilities, resilience to failure and fully decentralized control. In a peer to peer overlay, no entity has a global knowledge of the system. As much as this property is essential to ensure the scalability, monitoring the system under such circumstances is a complex task. Yet, estimating the size of the system is core functionality for many distributed applications to parameter setting or monitoring purposes. In this paper, we propose a comparative study between three algorithms that estimate in a fully decentralized way the size of a peer to peer overlay. Candidate approaches are generally applicable irrespective of the underlying structure of the peer to peer overlay. The paper reports the head to head comparison of estimation system size algorithms. The simulations have been conducted using the same simulation framework and inputs and highlight the differences in cost and accuracy of the estimation between the algorithms both in static and dynamic settings}, www_section = {comparison, counting, network size estimation, peer to peer}, isbn = {1-4244-0307-3}, doi = {http://dx.doi.org/10.1109/HPDC.2006.1652131}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/HPDC\%2706\%20-\%20Peer\%20to\%20peer\%20size\%20estimation\%20in\%20large\%20and\%20dynamic\%20networks.pdf}, }
@conference{2006_15, title = {Privacy Preserving Nearest Neighbor Search}, author = {Shaneck, M. and Yongdae Kim and Kumar, V.}, booktitle = {Data Mining Workshops, 2006. ICDM Workshops 2006. Sixth IEEE International Conference on}, year = {2006}, month = {December}, abstract = {Data mining is frequently obstructed by privacy concerns. In many cases data is distributed, and bringing the data together in one place for analysis is not possible due to privacy laws (e.g. HIPAA) or policies. Privacy preserving data mining techniques have been developed to address this issue by providing mechanisms to mine the data while giving certain privacy guarantees. In this work we address the issue of privacy preserving nearest neighbor search, which forms the kernel of many data mining applications. To this end, we present a novel algorithm based on secure multiparty computation primitives to compute the nearest neighbors of records in horizontally distributed data. We show how this algorithm can be used in three important data mining algorithms, namely LOF outlier detection, SNN clustering, and kNN classification}, www_section = {Clustering algorithms, Computer science, Conferences, cryptography, Data mining, data privacy, distributed computing, Kernel, kNN classification, LOF outlier detection, Medical diagnostic imaging, multiparty computation primitives, nearest neighbor search, Nearest neighbor searches, pattern clustering, privacy preservation, SNN clustering}, doi = {10.1109/ICDMW.2006.133}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/PrivacyPreserving2006Shaneck.pdf}, }
@article{2006_16, title = {Reactive Clustering in MANETs}, author = {Cramer, Curt and Oliver Stanze and Kilian Weniger and Martina Zitterbart}, journal = {International Journal of Pervasive Computing and Communications}, volume = {2}, year = {2006}, pages = {81--90}, publisher = {unknown}, abstract = {Many clustering protocols for mobile ad hoc networks (MANETs) have been proposed in the literature. With only one exception so far (1), all these protocols are proactive, thus wasting bandwidth when their function is not currently needed. To reduce the signalling traffic load, reactive clustering may be employed.We have developed a clustering protocol named {\textquotedblleft}On-Demand Group Mobility-Based Clustering{\textquotedblright} (ODGMBC) (2), (3) which is reactive. Its goal is to build clusters as a basis for address autoconfiguration and hierarchical routing. In contrast to the protocol described in ref. (1), the design process especially addresses the notions of group mobility and of multi-hop clusters in a MANET. As a result, ODGMBC maps varying physical node groups onto logical clusters. In this paper, ODGMBC is described. It was implemented for the ad hoc network simulator GloMoSim (4) and evaluated using several performance indicators. Simulation results are promising and show that ODGMBC leads to stable clusters. This stability is advantageous for autoconfiguration and routing mechansims to be employed in conjunction with the clustering algorithm}, www_section = {mobile Ad-hoc networks, multi-hop networks}, doi = {10.1108/17427370780000143}, url = {http://www.emeraldinsight.com/journals.htm?articleid=1615724\&show=pdf}, }
@book{2006_17, title = {Reputation Mechanisms}, author = {Chrysanthos Dellarocas}, booktitle = {Handbook on Information Systems and Economics}, organization = {Elsevier}, year = {2006}, pages = {629--660}, publisher = {Elsevier}, www_section = {online marketplace, reputation mechanism}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Dellarocas\%20-\%20Reputation\%20Mechanisms.pdf}, }
@conference{2006_18, title = {Scalable Routing in Sensor Actuator Networks with Churn}, author = {unknown}, booktitle = {Sensor and Ad Hoc Communications and Networks, 2006. SECON '06. 2006 3rd Annual IEEE Communications Society on}, year = {2006}, month = {September}, abstract = {Routing in wireless networks is inherently difficult since their network topologies are typically unstructured and unstable. Therefore, many routing protocols for ad-hoc networks and sensor networks revert to flooding to acquire routes to previously unknown destinations. However, such an approach does not scale to large networks, especially when nodes need to communicate with many different destinations. This paper advocates a novel approach, the scalable source routing (SSR) protocol. It combines overlay-like routing in a virtual network structure with source routing in the physical network structure. As a consequence, SSR can efficiently provide the routing semantics of a structured routing overlay, making it an efficient basis for the scalable implementation of fully decentralized applications. In T. Fuhrmann (2005) it has been demonstrated that SSR can almost entirely avoid flooding, thus leading to a both memory and message efficient routing mechanism for large unstructured networks. This paper extends SSR to unstable networks, i. e. networks with churn where nodes frequently join and leave, the latter potentially ungracefully}, www_section = {ad-hoc networks, scalable source routing}, isbn = {1-4244-0626-9}, doi = {10.1109/SAHCN.2006.288406}, url = {http://ieeexplore.ieee.org/Xplore/login.jsp?url=http\%3A\%2F\%2Fieeexplore.ieee.org\%2Fiel5\%2F4068086\%2F4068087\%2F04068105.pdf\%3Farnumber\%3D4068105\&authDecision=-203}, }
@book{2006_2, title = {Combinatorial Auctions}, author = {Peter Cramton and Yoav Shoham and Richard Steinberg}, organization = {MIT Press}, year = {2006}, address = {Cambridge, MA}, pages = {0--649}, publisher = {MIT Press}, abstract = {The study of combinatorial auctions -- auctions in which bidders can bid on combinations of items or "packages" -- draws on the disciplines of economics, operations research, and computer science. This landmark collection integrates these three perspectives, offering a state-of-the art survey of developments in combinatorial auction theory and practice by leaders in the field.Combinatorial auctions (CAs), by allowing bidders to express their preferences more fully, can lead to improved economic efficiency and greater auction revenues. However, challenges arise in both design and implementation. Combinatorial Auctions addresses each of these challenges. After describing and analyzing various CA mechanisms, the book addresses bidding languages and questions of efficiency. Possible strategies for solving the computationally intractable problem of how to compute the objective-maximizing allocation (known as the winner determination problem) are considered, as are questions of how to test alternative algorithms. The book discusses five important applications of CAs: spectrum auctions, airport takeoff and landing slots, procurement of freight transportation services, the London bus routes market, and industrial procurement. This unique collection makes recent work in CAs available to a broad audience of researchers and practitioners. The integration of work from the three disciplines underlying CAs, using a common language throughout, serves to advance the field in theory and practice}, www_section = {combinatorial auctions, winner determination problem}, isbn = {0262033429}, issn = {978-0262033428}, url = {http://works.bepress.com/cramton/35}, }
@article{2006_20, title = {A survey on networking games in telecommunications}, author = {Eitan Altman and Thomas Boulogne and Rachid El-Azouzi and Tania Jim{\'e}nez and Laura Wynter}, journal = {Computers \& Operations Research}, volume = {33}, year = {2006}, month = {February}, pages = {286--311}, publisher = {Elsevier}, abstract = {In this survey, we summarize different modeling and solution concepts of networking games, as well as a number of different applications in telecommunications that make use of or can make use of networking games. We identify some of the mathematical challenges and methodologies that are involved in these problems. We include here work that has relevance to networking games in telecommunications from other areas, in particular from transportation planning}, www_section = {communication network, game theory}, doi = {10.1016/j.cor.2004.06.005}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/netgames.pdf}, }
@book{2006_21, title = {Unconditionally Secure Constant-Rounds Multi-party Computation for Equality, Comparison, Bits and Exponentiation}, author = {Damg{\'a}rd, Ivan and Fitzi, Matthias and Kiltz, Eike and Nielsen, JesperBuus and Toft, Tomas}, booktitle = {Theory of Cryptography}, organization = {Springer Berlin Heidelberg}, volume = {3876}, year = {2006}, pages = {285--304}, editor = {Halevi, Shai and Rabin, Tal}, publisher = {Springer Berlin Heidelberg}, series = {Lecture Notes in Computer Science}, %%%%% ERROR: Non-ASCII characters: ''⌈⌉'' abstract = {We show that if a set of players hold shares of a value a {\epsilon} Fp for some prime p (where the set of shares is written [a] p ), it is possible to compute, in constant rounds and with unconditional security, sharings of the bits of a, i.e., compute sharings [a0] p , ..., [al- 1] p such that l = ⌈ log2 p ⌉, a0,...,al--1 {\epsilon} {0,1} and a = summation of ai * 2^i where 0 <= i <= l- 1. Our protocol is secure against active adversaries and works for any linear secret sharing scheme with a multiplication protocol. The complexity of our protocol is O(llogl) invocations of the multiplication protocol for the underlying secret sharing scheme, carried out in O(1) rounds. This result immediately implies solutions to other long-standing open problems such as constant-rounds and unconditionally secure protocols for deciding whether a shared number is zero, comparing shared numbers, raising a shared number to a shared exponent and reducing a shared number modulo a shared modulus}, isbn = {978-3-540-32731-8}, doi = {10.1007/11681878_15}, url = {http://dx.doi.org/10.1007/11681878_15}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/UnconditionallySecure2006Damgard.pdf}, %%%%% ERROR: Missing field % www_section = {?????}, }
@book{2006_3, title = {Combining Virtual and Physical Structures for Self-organized Routing}, author = {Thomas Fuhrmann}, booktitle = {Self-Organizing Systems}, volume = {Volume 4124/2006}, year = {2006}, publisher = {unknown}, series = {Lecture Notes in Computer Science}, abstract = {Our recently proposed scalable source routing (SSR) protocol combines source routing in the physical network with Chord-like routing in the virtual ring that is formed by the address space. Thereby, SSR provides self-organized routing in large unstructured networks of resource-limited devices. Its ability to quickly adapt to changes in the network topology makes it suitable not only for sensor-actuator networks but also for mobile ad-hoc networks. Moreover, SSR directly provides the key-based routing semantics, thereby making it an efficient basis for the scalable implementation of self-organizing, fully decentralized applications. In this paper we review SSR's self-organizing features and demonstrate how the combination of virtual and physical structures leads to emergence of stability and efficiency. In particular, we focus on SSR's resistance against node churn. Following the principle of combining virtual and physical structures, we propose an extension that stabilizes SSR in face of heavy node churn. Simulations demonstrate the effectiveness of this extension}, www_section = {Chord, scalable source routing, self-organization}, issn = {978-3-540-37658-3}, doi = {10.1007/11822035}, url = {http://www.springerlink.com/content/4540535t4v2g2548/}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Combining\%20Virtual\%20and\%20Physical\%20Structures\%20for\%20Self-organized\%20Routing_0.pdf}, }
@article{2006_4, title = {Communication Networks On the fundamental communication abstraction supplied by P2P overlay networks}, author = {Cramer Curt and Thomas Fuhrmann}, journal = {unknown}, year = {2006}, abstract = {The disruptive advent of peer-to-peer (P2P) file sharing in 2000 attracted significant interest. P2P networks have matured from their initial form, unstructured overlays, to structured overlays like distributed hash tables (DHTs), which are considered state-of-the-art. There are huge efforts to improve their performance. Various P2P applications like distributed storage and application-layer multicast were proposed. However, little effort was spent to understand the communication abstraction P2P overlays supply. Only when it is understood, the reach of P2P ideas will significantly broaden. Furthermore, this clarification reveals novel approaches and highlights future directions. In this paper, we reconsider well-known P2P overlays, linking them to insights from distributed systems research. We conclude that the main communication abstraction is that of a virtual address space or application-specific naming. On this basis, P2P systems build a functional layer implementing, for example lookup, indirection and distributed processing. Our insights led us to identify interesting and unexplored points in the design space}, www_section = {distributed hash table, P2P}, url = {http://www3.interscience.wiley.com/journal/109858517/abstract}, }
@article{2006_5, title = {Complementary currency innovations: Self-guarantee in peer-to-peer currencies}, author = {Mitra Ardron and Bernard Lietaer}, journal = {International Journal of Community Currency Research}, volume = {10}, year = {2006}, month = {January}, pages = {1--7}, abstract = {The WAT system, as used in Japan, allows for businesses to issue their own tickets (IOU's) which can circulate as a complementary currency within a community. This paper proposes a variation on that model, where the issuer of a ticket can offer a guarantee, in the form of some goods or services. The difference in value, along with a reasonable acceptance that the issuer is capable of delivering the service or goods, allows for a higher degree of confidence in the ticket, and therefore a greater liquidity}, www_section = {guarantee, peer-to-peer currencies}, issn = {1325-9547}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/IJCCR\%20vol\%2010\%20\%282006\%29\%201\%20Ardron\%20and\%20Lietaer.pdf}, }
@conference{2006_6, title = {Curve25519: new Diffie-Hellman speed records}, author = {Daniel J. Bernstein}, booktitle = {PKC}, year = {2006}, month = {February}, www_section = {Curve25519, ECC, ECDH, GNUnet}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/curve25519-20060209.pdf}, }
@book{2006_7, title = {Designing Economics Mechanisms}, author = {Leonid Hurwicz and Stanley Reiter}, organization = {Cambridge University Press}, year = {2006}, address = {Cambridge, U.K}, publisher = {Cambridge University Press}, abstract = {A mechanism is a mathematical structure that models institutions through which economic activity is guided and coordinated. There are many such institutions; markets are the most familiar ones. Lawmakers, administrators and officers of private companies create institutions in order to achieve desired goals. They seek to do so in ways that economize on the resources needed to operate the institutions, and that provide incentives that induce the required behaviors. This book presents systematic procedures for designing mechanisms that achieve specified performance, and economize on the resources required to operate the mechanism. The systematic design procedures are algorithms for designing informationally efficient mechanisms. Most of the book deals with these procedures of design. When there are finitely many environments to be dealt with, and there is a Nash-implementing mechanism, our algorithms can be used to make that mechanism into an informationally efficient one. Informationally efficient dominant strategy implementation is also studied. Leonid Hurwicz is the Nobel Prize Winner 2007 for The Sveriges Riksbank Prize in Economic Sciences in Memory of Alfred Nobel, along with colleagues Eric Maskin and Roger Myerson, for his work on the effectiveness of markets}, www_section = {algorithms, Complexity, Computational Geometry, Computer Algebra, Economics: general interest}, isbn = {9780521836418}, doi = {http://dx.doi.org/10.1017/CBO9780511754258}, }
@book{2006_8, title = {Differential Privacy}, author = {Dwork, Cynthia}, booktitle = {Automata, Languages and Programming}, organization = {Springer Berlin Heidelberg}, volume = {4052}, year = {2006}, pages = {1--12}, editor = {Bugliesi, Michele and Preneel, Bart and Sassone, Vladimiro and Wegener, Ingo}, publisher = {Springer Berlin Heidelberg}, series = {Lecture Notes in Computer Science}, abstract = {In 1977 Dalenius articulated a desideratum for statistical databases: nothing about an individual should be learnable from the database that cannot be learned without access to the database. We give a general impossibility result showing that a formalization of Dalenius' goal along the lines of semantic security cannot be achieved. Contrary to intuition, a variant of the result threatens the privacy even of someone not in the database. This state of affairs suggests a new measure, differential privacy, which, intuitively, captures the increased risk to one's privacy incurred by participating in a database.The techniques developed in a sequence of papers [8, 13, 3], culminating in those described in [12], can achieve any desired level of privacy under this measure. In many cases, extremely accurate information about the database can be provided while simultaneously ensuring very high levels of privacy}, isbn = {978-3-540-35907-4}, doi = {10.1007/11787006_1}, url = {http://dx.doi.org/10.1007/11787006_1}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/DifferentialPrivacy2006Dwork_0.pdf}, %%%%% ERROR: Missing field % www_section = {?????}, }
@mastersthesis{2006_9, title = {Distributed k-ary System: Algorithms for Distributed Hash Tables}, author = {Ali Ghodsi}, school = {KTH/Royal Institute of Technology}, year = {2006}, month = {December}, address = {Stockholm}, pages = {0--209}, type = {Doctoral}, abstract = {This dissertation presents algorithms for data structures called distributed hash tables (DHT) or structured overlay networks, which are used to build scalable self-managing distributed systems. The provided algorithms guarantee lookup consistency in the presence of dynamism: they guarantee consistent lookup results in the presence of nodes joining and leaving. Similarly, the algorithms guarantee that routing never fails while nodes join and leave. Previous algorithms for lookup consistency either suffer from starvation, do not work in the presence of failures, or lack proof of correctness. Several group communication algorithms for structured overlay networks are presented. We provide an overlay broadcast algorithm, which unlike previous algorithms avoids redundant messages, reaching all nodes in O(log n) time, while using O(n) messages, where n is the number of nodes in the system. The broadcast algorithm is used to build overlay multicast. We introduce bulk operation, which enables a node to efficiently make multiple lookups or send a message to all nodes in a specified set of identifiers. The algorithm ensures that all specified nodes are reached in O(log n) time, sending maximum O(log n) messages per node, regardless of the input size of the bulk operation. Moreover, the algorithm avoids sending redundant messages. Previous approaches required multiple lookups, which consume more messages and can render the initiator a bottleneck. Our algorithms are used in DHT-based storage systems, where nodes can do thousands of lookups to fetch large files. We use the bulk operation algorithm to construct a pseudo-reliable broadcast algorithm. Bulk operations can also be used to implement efficient range queries. Finally, we describe a novel way to place replicas in a DHT, called symmetric replication, that enables parallel recursive lookups. Parallel lookups are known to reduce latencies. However, costly iterative lookups have previously been used to do parallel lookups. Moreover, joins or leaves only require exchanging O(1) messages, while other schemes require at least log(f) messages for a replication degree of f. The algorithms have been implemented in a middleware called the Distributed k-ary System (DKS), which is briefly described}, www_section = {distributed hash table, distributed k-ary system, DKS}, url = {http://eprints.sics.se/516/}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Ghodsi\%20-\%20Distributed\%20k-ary\%20System.pdf}, }
@article{2007_0, title = {Analyzing Peer Behavior in KAD}, author = {Steiner, Moritz and En-Najjary, Taoufik and E W Biersack}, journal = {unknown}, institution = {Institut Eurecom}, number = {RR-07-205}, year = {2007}, month = {October}, address = {Sophia Antipolis}, type = {Tech report}, abstract = {Distributed hash tables (DHTs) have been actively studied in literature and many different proposals have been made on how to organize peers in a DHT. However, very few DHTs have been implemented in real systems and deployed on a large scale. One exception is KAD, a DHT based on Kademlia, which is part of eDonkey2000, a peer-to-peer file sharing system with several million simultaneous users. We have been crawling KAD continuously for about six months and obtained information about geographical distribution of peers, session times, peer availability, and peer lifetime. We also evaluated to what extent information about past peer uptime can be used to predict the remaining uptime of the peer. Peers are identified by the so called KAD ID, which was up to now as- sumed to remain the same across sessions. However, we observed that this is not the case: There is a large number of peers, in particular in China, that change their KAD ID, sometimes as frequently as after each session. This change of KAD IDs makes it difficult to characterize end-user availability or membership turnover. By tracking end-users with static IP addresses, we could measure the rate of change of KAD ID per end-user}, www_section = {distributed hash table, KAD, peer behavior}, issn = {RR-07-205}, url = {http://www.eurecom.fr/~btroup/kadtraces/}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Tech\%20Report\%20-\%20Analyzing\%20peer\%20behavior\%20in\%20KAD.pdf}, }
@book{2007_1, title = {B.A.T.M.A.N Status Report}, author = {Axel Neumann and Corinna Elektra Aichele and Marek Lindner}, year = {2007}, publisher = {unknown}, abstract = {This report documents the current status of the development and implementation of the B.A.T.M.A.N (better approach to mobile ad-hoc networking) routing protocol. B.A.T.M.A.N uses a simple and robust algorithm for establishing multi-hop routes in mobile ad-hoc networks.It ensures highly adaptive and loop-free routing while causing only low processing and traffic cost}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/batman-status.pdf}, %%%%% ERROR: Missing field % www_section = {?????}, }
@book{2007_10, title = {Practical and Secure Solutions for Integer Comparison}, author = {Garay, Juan and Schoenmakers, Berry and Villegas, Jos{\'e}}, booktitle = {Public Key Cryptography -- PKC 2007}, organization = {Springer Berlin Heidelberg}, volume = {4450}, year = {2007}, pages = {330--342}, editor = {Okamoto, Tatsuaki and Wang, Xiaoyun}, publisher = {Springer Berlin Heidelberg}, series = {Lecture Notes in Computer Science}, abstract = {Yao's classical millionaires' problem is about securely determining whether x > y, given two input values x,y, which are held as private inputs by two parties, respectively. The output x > y becomes known to both parties. In this paper, we consider a variant of Yao's problem in which the inputs x,y as well as the output bit x > y are encrypted. Referring to the framework of secure n-party computation based on threshold homomorphic cryptosystems as put forth by Cramer, Damg{\r a}rd, and Nielsen at Eurocrypt 2001, we develop solutions for integer comparison, which take as input two lists of encrypted bits representing x and y, respectively, and produce an encrypted bit indicating whether x > y as output. Secure integer comparison is an important building block for applications such as secure auctions. In this paper, our focus is on the two-party case, although most of our results extend to the multi-party case. We propose new logarithmic-round and constant-round protocols for this setting, which achieve simultaneously very low communication and computational complexities. We analyze the protocols in detail and show that our solutions compare favorably to other known solutions}, www_section = {homomorphic encryption, Millionaires' problem, secure multi-party computation}, isbn = {978-3-540-71676-1}, doi = {10.1007/978-3-540-71677-8_22}, url = {http://dx.doi.org/10.1007/978-3-540-71677-8_22}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/IntegerComparisonSolution2007Garay.pdf}, }
@mastersthesis{2007_11, title = {Secure asynchronous change notifications for a distributed file system}, author = {Bernhard Amann}, school = {Technische Universit{\"a}t M{\"u}nchen}, volume = {Computer Science}, year = {2007}, month = {November}, address = {Munich, Germany}, pages = {0--74}, abstract = {Distributed file systems have been a topic of interest for a long time and there are many file systems that are distributed in one way or another. However most distributed file systems are only reasonably usable within a local network of computers and some main tasks are still delegated to a very small number of servers. Today with the advent of Peer-to-Peer technology, distributed file systems that work on top of Peer-to-Peer systems can be built. These systems can be built with no or much less centralised components and are usable on a global scale. The System Architecture Group at the University of Karlsruhe in Germany has developedsuch a file system, which is built on top of a structured overlay network and uses Distributed Hash Tables to store and access the information. One problem with this approach is, that each file system can only be accessed with the help of an identifier, which changes whenever a file system is modified. All clients have to be notified of the new identifier in a secure, fast and reliable way. Usually the strategy to solve this type of problem is an encrypted multicast. This thesis presents and analyses several strategies of using multicast distributions to solve this problem and then unveils our final solution based on the Subset Difference method proposed by Naor et al}, www_section = {distributed file system, distributed hash table, peer-to-peer networking, store information}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Amann\%20-\%20Secure\%20asynchronous\%20change\%20notifications.pdf}, }
@conference{2007_12, title = {SpoVNet: An Architecture for Supporting Future Internet Applications}, author = {Sebastian Mies}, booktitle = {Proc. 7th W{\"u}rzburg Workshop on IP: Joint EuroFGI and ITG Workshop on Visions of Future Generation Networks'}, year = {2007}, address = {W{\"u}rzburg, Germany}, abstract = {This talk presents an approach for providing Spontaneous Virtual Networks (SpoVNets) that enable flexible, adaptive, and spontaneous provisioning of application-oriented and network-oriented services on top of heterogeneous networks. SpoVNets supply new and uniform communication abstrac-tions for future Internet applications so applications can make use of advanced services not supported by today's Internet. We expect that many functions, which are currently provided by SpoVNet on the application layer will become an integral part of future networks. Thus, SpoVNet will transparently use advanced services from the underlying network infrastructure as they become available (e.g., QoS-support in access networks or multicast in certain ISPs), enabling a seamless transition from current to future genera-tion networks without modifying the applications}, url = {http://www.tm.uka.de/itm/publications.php?bib=257}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/SpoVNet.pdf , https://git.gnunet.org/bibliography.git/plain/docs/Mies\%20-\%20SpoVNet.pdf}, %%%%% ERROR: Missing field % www_section = {?????}, }
@conference{2007_13, title = {An Unconditionally Secure Protocol for Multi-Party Set Intersection}, author = {Li, Ronghua and Wu, Chuankun}, booktitle = {Proceedings of the 5th International Conference on Applied Cryptography and Network Security}, organization = {Springer-Verlag}, year = {2007}, address = {Berlin, Heidelberg}, publisher = {Springer-Verlag}, abstract = {Existing protocols for private set intersection are based on homomorphic public-key encryption and the technique of representing sets as polynomials in the cryptographic model. Based on the ideas of these protocols and the two-dimensional verifiable secret sharing scheme, we propose a protocol for private set intersection in the information-theoretic model. By representing the sets as polynomials, the set intersection problem is converted into the task of computing the common roots of the polynomials. By sharing the coefficients of the polynomials among parties, the common roots can be computed out using the shares. As long as more than 2n/3 parties are semi-honest, our protocol correctly computes the intersection of nsets, and reveals no other information than what is implied by the intersection and the secrets sets controlled by the active adversary. This is the first specific protocol for private set intersection in the information-theoretic model as far as we know}, www_section = {privacy-preserving set ntersection, secure multi-party computation, unconditional security}, isbn = {978-3-540-72737-8}, doi = {10.1007/978-3-540-72738-5_15}, url = {http://dx.doi.org/10.1007/978-3-540-72738-5_15}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/UnconditionallySecureProtocol2007Li.pdf}, }
@article{2007_14, title = {Vielleicht anonym? Die Enttarnung von StealthNet-Nutzern}, author = {Nils Durner and Nathan S Evans and Christian Grothoff}, journal = {c't magazin f{\"u}r computer technik}, year = {2007}, type = {Report}, www_section = {anonymity, file-sharing, Rshare, Stealthnet}, url = {http://www.heise.de/kiosk/archiv/ct/2007/21/218_Die-Enttarnung-von-StealthNet-Nutzern}, }
@mastersthesis{2007_2, title = {Cooperative Data Backup for Mobile Devices}, author = {Ludovic Court{\`e}s}, volume = {Ph.D}, year = {2007}, month = {March}, abstract = {Mobile devices such as laptops, PDAs and cell phones are increasingly relied on but are used in contexts that put them at risk of physical damage, loss or theft. However, few mechanisms are available to reduce the risk of losing the data stored on these devices. In this dissertation, we try to address this concern by designing a cooperative backup service for mobile devices. The service leverages encounters and spontaneous interactions among participating devices, such that each device stores data on behalf of other devices. We first provide an analytical evaluation of the dependability gains of the proposed service. Distributed storage mechanisms are explored and evaluated. Security concerns arising from thecooperation among mutually suspicious principals are identified, and core mechanisms are proposed to allow them to be addressed. Finally, we present our prototype implementation of the cooperative backup service}, www_section = {backup, dependability, P2P, ubiquitous computing}, url = {http://ethesis.inp-toulouse.fr/archive/00000544/}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/phd-thesis.fr_en.pdf}, }
@article{2007_3, title = {Gossip-based Peer Sampling}, author = {Jelasity, M{\'a}rk and Voulgaris, Spyros and Guerraoui, Rachid and Kermarrec, Anne-Marie and van Steen, Maarten}, journal = {ACM Trans. Comput. Syst}, volume = {25}, year = {2007}, abstract = {Gossip-based communication protocols are appealing in large-scale distributed applications such as information dissemination, aggregation, and overlay topology management. This paper factors out a fundamental mechanism at the heart of all these protocols: the peer-sampling service. In short, this service provides every node with peers to gossip with. We promote this service to the level of a first-class abstraction of a large-scale distributed system, similar to a name service being a first-class abstraction of a local-area system. We present a generic framework to implement a peer-sampling service in a decentralized manner by constructing and maintaining dynamic unstructured overlays through gossiping membership information itself. Our framework generalizes existing approaches and makes it easy to discover new ones. We use this framework to empirically explore and compare several implementations of the peer sampling service. Through extensive simulation experiments we show that---although all protocols provide a good quality uniform random stream of peers to each node locally---traditional theoretical assumptions about the randomness of the unstructured overlays as a whole do not hold in any of the instances. We also show that different design decisions result in severe differences from the point of view of two crucial aspects: load balancing and fault tolerance. Our simulations are validated by means of a wide-area implementation}, www_section = {epidemic protocols, Gossip-based protocols, peer sampling service}, issn = {0734-2071}, doi = {10.1145/1275517.1275520}, url = {http://doi.acm.org/10.1145/1275517.1275520}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/GossipPeerSampling2007Jelasity.pdf}, }
@article{2007_4, title = {Gossiping in Distributed Systems}, author = {Kermarrec, Anne-Marie and van Steen, Maarten}, journal = {SIGOPS Oper. Syst. Rev}, volume = {41}, year = {2007}, pages = {2--7}, abstract = {Gossip-based algorithms were first introduced for reliably disseminating data in large-scale distributed systems. However, their simplicity, robustness, and flexibility make them attractive for more than just pure data dissemination alone. In particular, gossiping has been applied to data aggregation, overlay maintenance, and resource allocation. Gossiping applications more or less fit the same framework, with often subtle differences in algorithmic details determining divergent emergent behavior. This divergence is often difficult to understand, as formal models have yet to be developed that can capture the full design space of gossiping solutions. In this paper, we present a brief introduction to the field of gossiping in distributed systems, by providing a simple framework and using that framework to describe solutions for various application domains}, issn = {0163-5980}, doi = {10.1145/1317379.1317381}, url = {http://doi.acm.org/10.1145/1317379.1317381}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Gossiping2007Kermarrrec.pdf}, %%%%% ERROR: Missing field % www_section = {?????}, }
@book{2007_5, title = {The Iterated Prisoner's Dilemma: 20 Years On}, author = {Graham Kendall and Xin Yao and Siang Yew Ching}, organization = {World Scientific Publishing Co. Pte. Ltd}, volume = {4}, year = {2007}, address = {Singapore}, pages = {0--262}, publisher = {World Scientific Publishing Co. Pte. Ltd}, series = {Advances in Natural Computation}, abstract = {In 1984, Robert Axelrod published a book, relating the story of two competitions which he ran, where invited academics entered strategies for "The Iterated Prisoners' Dilemma". The book, almost 20 years on, is still widely read and cited by academics and the general public. As a celebration of that landmark work, we have recreated those competitions to celebrate its 20th anniversary, by again inviting academics to submit prisoners' dilemma strategies. The first of these new competitions was run in July 2004, and the second in April 2005. "Iterated Prisoners' Dilemma: 20 Years On essentially" provides an update of the Axelrod's book. Specifically, it presents the prisoners' dilemma, its history and variants; highlights original Axelrod's work and its impact; discusses results of new competitions; and, showcases selected papers that reflect the latest researches in the area}, www_section = {dilemma, iterated prisoners, landmark work}, isbn = {978-981-270-697-3}, issn = {981-270-697-6}, }
@article{2007_6, title = {Mapping an Arbitrary Message to an Elliptic Curve when Defined over GF (2n)}, author = {Brian King}, journal = {International Journal of Network Security}, volume = {8}, year = {2007}, month = {March}, chapter = {169}, pages = {169--176}, abstract = {The use of elliptic curve cryptography (ECC) when used as a public-key cryptosystem for encryption is such that if one has a message to encrypt, then they attempt to map it to some point in the prime subgroup of the elliptic curve by systematically modifying the message in a determinis- tic manner. The applications typically used for ECC are the key-exchange, digital signature or a hybrid encryption systems (ECIES) all of which avoid this problem. In this paper we provide a deterministic method that guarantees that the map of a message to an elliptic curve point can be made without any modification. This paper provides a solution to the open problem posed in [7] concerning the creation of a deterministic method to map arbitrary message to an elliptic curve}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/ijns-2009-v8-n2-p169-176.pdf}, %%%%% ERROR: Missing field % www_section = {?????}, }
@book{2007_7, title = {Multiparty Computation for Interval, Equality, and Comparison Without Bit-Decomposition Protocol}, author = {Nishide, Takashi and Ohta, Kazuo}, booktitle = {Public Key Cryptography -- PKC 2007}, organization = {Springer Berlin Heidelberg}, volume = {4450}, year = {2007}, pages = {343--360}, editor = {Okamoto, Tatsuaki and Wang, Xiaoyun}, publisher = {Springer Berlin Heidelberg}, series = {Lecture Notes in Computer Science}, abstract = {Damg{\r a}rd et al. [11] showed a novel technique to convert a polynomial sharing of secret a into the sharings of the bits of a in constant rounds, which is called the bit-decomposition protocol. The bit-decomposition protocol is a very powerful tool because it enables bit-oriented operations even if shared secrets are given as elements in the field. However, the bit-decomposition protocol is relatively expensive. In this paper, we present a simplified bit-decomposition protocol by analyzing the original protocol. Moreover, we construct more efficient protocols for a comparison, interval test and equality test of shared secrets without relying on the bit-decomposition protocol though it seems essential to such bit-oriented operations. The key idea is that we do computation on secret a with c and r where c = a + r, c is a revealed value, and r is a random bitwise-shared secret. The outputs of these protocols are also shared without being revealed. The realized protocols as well as the original protocol are constant-round and run with less communication rounds and less data communication than those of [11]. For example, the round complexities are reduced by a factor of approximately 3 to 10}, www_section = {Bitwise Sharing, Multiparty Computation, secret sharing}, isbn = {978-3-540-71676-1}, doi = {10.1007/978-3-540-71677-8_23}, url = {http://dx.doi.org/10.1007/978-3-540-71677-8_23}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/MultiPartyComputation2007Nishide.pdf}, }
@conference{2007_8, title = {A New Efficient Privacy-preserving Scalar Product Protocol}, author = {Amirbekyan, Artak and Estivill-Castro, Vladimir}, booktitle = {Proceedings of the Sixth Australasian Conference on Data Mining and Analytics--Volume 70}, organization = {Australian Computer Society, Inc}, year = {2007}, address = {Darlinghurst, Australia, Australia}, publisher = {Australian Computer Society, Inc}, abstract = {Recently, privacy issues have become important in data analysis, especially when data is horizontally partitioned over several parties. In data mining, the data is typically represented as attribute-vectors and, for many applications, the scalar (dot) product is one of the fundamental operations that is repeatedly used. In privacy-preserving data mining, data is distributed across several parties. The efficiency of secure scalar products is important, not only because they can cause overhead in communication cost, but dot product operations also serve as one of the basic building blocks for many other secure protocols. Although several solutions exist in the relevant literature for this problem, the need for more efficient and more practical solutions still remains. In this paper, we present a very efficient and very practical secure scalar product protocol. We compare it to the most common scalar product protocols. We not only show that our protocol is much more efficient than the existing ones, we also provide experimental results by using a real life dataset}, www_section = {privacy preserving data mining}, isbn = {978-1-920682-51-4}, url = {http://dl.acm.org/citation.cfm?id=1378245.1378274}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/PrivacyPreserving2007Ambirbekyan.pdf}, }
@conference{2007_9, title = {ParaNets: A Parallel Network Architecture for Challenged Networks}, author = {Khaled A. Harras and Mike P. Wittie and Kevin C. Almeroth and Elizabeth M. Belding}, booktitle = {ParaNets: A Parallel Network Architecture for Challenged Networks}, year = {2007}, month = {March}, abstract = {Networks characterized by challenges, such as intermittent connectivity, network heterogeneity, and large delays, are called "challenged networks". We propose a novel network architecture for challenged networks dubbed Parallel Networks, or, ParaNets. The vision behind ParaNets is to have challenged network protocols operate over multiple heterogenous networks, simultaneously available, through one or more devices. We present the ParaNets architecture and discuss its short-term challenges and longterm implications. We also argue, based on current research trends and the ParaNets architecture, for the evolution of the conventional protocol stack to a more flexible cross-layered protocol tree. To demonstrate the potential impact of ParaNets, we use Delay Tolerant Mobile Networks (DTMNs) as a representative challenged network over which we evaluate ParaNets. Our ultimate goal in this paper is to open the way for further work in challenged networks using ParaNets as the underlying architecture}, isbn = {978-0-7695-3001-7}, url = {http://ieeexplore.ieee.org/Xplore/login.jsp?reload=true\&url=http\%3A\%2F\%2Fieeexplore.ieee.org\%2Fiel5\%2F4389542\%2F4389543\%2F04389561.pdf\%3Farnumber\%3D4389561\&authDecision=-203}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/hotmobile07.pdf}, %%%%% ERROR: Missing field % www_section = {?????}, }
@conference{2008_0, title = {AmbiComp: A platform for distributed execution of Java programs on embedded systems by offering a single system image}, author = {Johannes Eickhold and Thomas Fuhrmann and Bjoern Saballus and Sven Schlender and Thomas Suchy}, booktitle = {AmI-Blocks'08, European Conference on Ambient Intelligence 2008 manuscript No}, year = {2008}, month = {January}, abstract = {Ambient Intelligence pursues the vision that small networked computers will jointly perform tasks that create the illusion of an intelligent environment. One of the most pressing challenges in this context is the question how one could easily develop software for such highly complex, but resource-scarce systems. In this paper we present a snapshot of our ongoing work towards facilitating oftware development for Am- bient Intelligence systems. In particular, we present the AmbiComp [1] platform. It consists of small, modular hardware, a exible rmware including a Java Virtual Machine, and an Eclipse-based integrated development environment}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/publ_2008_eickhold-fuhrmann-saballus-ua_ambicomp.pdf}, %%%%% ERROR: Missing field % www_section = {?????}, }
@conference{2008_1, title = {Analyzing Unreal Tournament 2004 Network Traffic Characteristics}, author = {H{\"u}bsch, Christian}, booktitle = {CGAT'08 Singapore, 28th-30th}, year = {2008}, abstract = {With increasing availability of high-speed access links in the private sector, online real-time gaming has become a major and still growing segment in terms of market and network impact today. One of the most popular games is Unreal Tournament 2004, a fast-paced action game that still ranks within the top 10 of the most-played multiplayer Internet-games, according to GameSpy [1]. Besides high demands in terms of graphical computation, games like Unreal also impose hard requirements regarding network packet delay and jitter, for small deterioration in these conditions influences gameplay recognizably. To make matters worse, such games generate a very specific network traffic with strong requirements in terms of data delivery. In this paper, we analyze the network traffic characteristics of Unreal Tournament 2004. The experiments include different aspects like variation of map sizes, player count, player behavior as well as hardware and game-specific configuration. We show how different operating systems influence network behavior of the game. Our work gives a promising picture of how the specific real-time game behaves in terms of network impact and may be used as a basis e.g. for the development of specialized traffic generators}, url = {http://www.tm.uka.de/itm/WebMan/view.php?view=publikationen_detail\&id=295}, %%%%% ERROR: Missing field % www_section = {?????}, }
@conference{2008_10, title = {Robust De-anonymization of Large Sparse Datasets}, author = {Narayanan, Arvind and Shmatikov, Vitaly}, booktitle = {Proceedings of the 2008 IEEE Symposium on Security and Privacy}, organization = {IEEE Computer Society}, year = {2008}, address = {Washington, DC, USA}, publisher = {IEEE Computer Society}, abstract = {We present a new class of statistical deanonymization attacks against high-dimensional micro-data, such as individual preferences, recommendations, transaction records and so on. Our techniques are robust to perturbation in the data and tolerate some mistakes in the adversary's background knowledge. We apply our de-anonymization methodology to the Netflix Prize dataset, which contains anonymous movie ratings of 500,000 subscribers of Netflix, the world's largest online movie rental service. We demonstrate that an adversary who knows only a little bit about an individual subscriber can easily identify this subscriber's record in the dataset. Using the Internet Movie Database as the source of background knowledge, we successfully identified the Netflix records of known users, uncovering their apparent political preferences and other potentially sensitive information}, www_section = {anonymity, attack, privacy}, isbn = {978-0-7695-3168-7}, doi = {10.1109/SP.2008.33}, url = {http://dx.doi.org/10.1109/SP.2008.33}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Deanonymization2008narayanan.pdf}, }
@booklet{2008_11, title = {The Spontaneous Virtual Networks Architecture for Supporting Future Internet Services and Applications}, author = {Roland Bless and Oliver Waldhorst and Mayer, Christoph P.}, year = {2008}, publisher = {NEC, Heidelberg}, note = {Vortrag auf dem Fachgespr{\"a}ch der GI/ITG-Fachgruppe {\textquoteleft}{\textquoteleft}Kommunikation und Verteilte Systeme'' Future Internet}, %%%%% ERROR: Missing field % www_section = {?????}, }
@conference{2008_12, title = {Towards Empirical Aspects of Secure Scalar Product}, author = {I-Cheng Wang and Chih-Hao Shen and Tsan-sheng Hsu and Churn-Chung Liao and Da-Wei Wang and Zhan, J.}, booktitle = {Information Security and Assurance, 2008. ISA 2008. International Conference on}, year = {2008}, month = {April}, abstract = {Privacy is ultimately important, and there is a fair amount of research about it. However, few empirical studies about the cost of privacy are conducted. In the area of secure multiparty computation, the scalar product has long been reckoned as one of the most promising building blocks in place of the classic logic gates. The reason is not only the scalar product complete, which is as good as logic gates, but also the scalar product is much more efficient than logic gates. As a result, we set to study the computation and communication resources needed for some of the most well-known and frequently referred secure scalar-product protocols, including the composite-residuosity, the invertible-matrix, the polynomial-sharing, and the commodity-based approaches. Besides the implementation remarks of these approaches, we analyze and compare their execution time, computation time, and random number consumption, which are the most concerned resources when talking about secure protocols. Moreover, Fairplay the benchmark approach implementing Yao's famous circuit evaluation protocol, is included in our experiments in order to demonstrate the potential for the scalar product to replace logic gates}, www_section = {circuit evaluation protocol, Circuits, commodity-based, composite residuosity, composite-residuosity, Computational efficiency, Costs, data privacy, empirical survey, Information science, information security, invertible-matrix, logic gates, polynomial-sharing, Polynomials, privacy, Proposals, protocols, scalar-product, secure multiparty computation, secure protocols, Secure scalar product, secure scalar-product protocols}, doi = {10.1109/ISA.2008.78}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/EmpiricalAspects2009Wang.pdf}, }
@conference{2008_13, title = {The Underlay Abstraction in the Spontaneous Virtual Networks (SpoVNet) Architecture}, author = {Roland Bless and H{\"u}bsch, Christian and Sebastian Mies and Oliver Waldhorst}, booktitle = {Proc. 4th EuroNGI Conf. on Next Generation Internet Networks (NGI 2008)}, year = {2008}, address = {Krakow, Poland}, pages = {115--122}, abstract = {Next generation networks will combine many heterogeneous access technologies to provide services to a large number of highly mobile users while meeting their demands for quality of service, robustness, and security. Obviously, this is not a trivial task and many protocols fulfilling some combination of these requirements have been proposed. However, non of the current proposals meets all requirements, and the deployment of new applications and services is hindered by a patchwork of protocols. This paper presents Spontaneous Virtual Networks (SpoVNet), an architecture that fosters the creation of new applications and services for next generation networks by providing an underlay abstraction layer. This layer applies an overlay-based approach to cope with mobility, multi-homing, and heterogeneity. For coping with network mobility, it uses a SpoVNet-specific addressing scheme, splitting node identifiers from network locators and providing persistent connections by transparently switching locators. To deal with multihoming it transparently chooses the most appropriate pair of network locators for each connection. To cope with network and protocol heterogeneity, it uses dedicated overlay nodes, e.g., for relaying between IPv4 and IPv6 hosts}, www_section = {heterogeneity, robustness}, url = {http://www.tm.uka.de/itm/WebMan/view.php?view=publikationen_detail\&id=283}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/underlayabs-ngi08-final.pdf}, }
@article{2008_14, title = {Unerkannt. Anonymisierende Peer-to-Peer-Netze im {\"U}berblick}, author = {Nils Durner and Nathan S Evans and Christian Grothoff}, journal = {iX magazin f{\"u}r professionelle informationstechnik}, year = {2008}, type = {Survey}, url = {http://www.heise.de/kiosk/archiv/ix/2008/9/88_Anonyme-Peer-to-Peer-Netze-im-Ueberblick}, %%%%% ERROR: Missing field % www_section = {?????}, }
@article{2008_15, title = {What Can We Learn Privately?}, author = {Shiva Prasad Kasiviswanathan and Homin K. Lee and Kobbi Nissim and Sofya Raskhodnikova and Adam Smith}, journal = {CoRR}, volume = {abs/0803.0924}, year = {2008}, abstract = {Learning problems form an important category of computational tasks that generalizes many of the computations researchers apply to large real-life data sets. We ask: what concept classes can be learned privately, namely, by an algorithm whose output does not depend too heavily on any one input or specific training example? More precisely, we investigate learning algorithms that satisfy differential privacy, a notion that provides strong confidentiality guarantees in contexts where aggregate information is released about a database containing sensitive information about individuals. We demonstrate that, ignoring computational constraints, it is possible to privately agnostically learn any concept class using a sample size approximately logarithmic in the cardinality of the concept class. Therefore, almost anything learnable is learnable privately: specifically, if a concept class is learnable by a (non-private) algorithm with polynomial sample complexity and output size, then it can be learned privately using a polynomial number of samples. We also present a computationally efficient private PAC learner for the class of parity functions. Local (or randomized response) algorithms are a practical class of private algorithms that have received extensive investigation. We provide a precise characterization of local private learning algorithms. We show that a concept class is learnable by a local algorithm if and only if it is learnable in the statistical query (SQ) model. Finally, we present a separation between the power of interactive and noninteractive local learning algorithms}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/WhatCanWeLearnPrivately2008Kasiviswanthan.pdf}, %%%%% ERROR: Missing field % www_section = {?????}, }
@conference{2008_2, title = {Consistency Management for Peer-to-Peer-based Massively Multiuser Virtual Environments}, author = {Gregor Schiele and Richard S{\"u}selbeck and Arno Wacker and Triebel, Tonio and Christian Becker}, booktitle = {Proc. 1st Int.Workshop on Massively Multiuser Virtual Environments (MMVE'08)}, year = {2008}, url = {http://www.spovnet.de/files/publications/MMVEConsistency.pdf/view}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/MMVEConsistency.pdf}, %%%%% ERROR: Missing field % www_section = {?????}, }
@mastersthesis{2008_3, title = {The Decentralized File System Igor-FS as an Application for Overlay-Networks}, author = {unknown}, school = {Universit{\"a}t Fridericiana (TH)}, volume = {Engineering}, year = {2008}, month = {February}, address = {Karlsruhe, Germany}, pages = {0--193}, type = {Doctoral}, abstract = {Working in distributed systems is part of the information society. More and more people and organizations work with growing data volumes. Often, part of the problem is to access large files in a share way. Until now, there are two often used approaches to allow this kind off access. Either the files are tranfered via FTP, e-mail or similar medium before the access happens, or a centralized server provides file services. The first alternative has the disadvantage that the entire file has to be transfered before the first access can be successful. If only small parts in the file have been changed compared to a previous version, the entire file has to be transfered anyway. The centralized approach has disadvantages regarding scalability and reliability. In both approaches authorization and authentication can be difficult in case users are seperated by untrusted network segements}, url = {http://digbib.ubka.uni-karlsruhe.de/volltexte/1000009668}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Kutzner\%20-\%20The\%20descentralized\%20file\%20system\%20Igor-FS\%20as\%20an\%20application_0.pdf}, %%%%% ERROR: Missing field % www_section = {?????}, }
@book{2008_4, title = {Estimating The Size Of Peer-To-Peer Networks Using Lambert's W Function}, author = {Javier Bustos-Jim{\'e}nez and Nicol{\'a}s Bersano and Satu Elisa Schaeffer and Jos{\'e} Miguel Piquer and Alexandru Iosup and Augusto Ciuffoletti}, booktitle = {Grid Computing--Achievements and Prospects}, organization = {Springer-Verlag}, year = {2008}, address = {New York, NY, USA}, pages = {61--72}, publisher = {Springer-Verlag}, abstract = {In this work, we address the problem of locally estimating the size of a Peer-to-Peer (P2P) network using local information. We present a novel approach for estimating the size of a peer-to-peer (P2P) network, fitting the sum of new neighbors discovered at each iteration of a breadth-first search (BFS) with a logarithmic function, and then using Lambert's W function to solve a root of a ln(n) + b--n = 0, where n is the network size. With rather little computation, we reach an estimation error of at most 10 percent, only allowing the BFS to iterate to the third level}, www_section = {distributed computing, lambert w function, network size estimation, peer-to-peer networking}, isbn = {978-0-387-09456-4}, issn = {978-0-387-09456-4}, url = {http://eprints.adm.unipi.it/649/}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Grid\%20Computing\%20-\%20Estimating\%20The\%20Size\%20Of\%20Peer-To-Peer\%20Networks.pdf}, }
@article{2008_5, title = {On the False-positive Rate of Bloom Filters}, author = {Bose, Prosenjit and Guo, Hua and Kranakis, Evangelos and Maheshwari, Anil and Morin, Pat and Morrison, Jason and Smid, Michiel and Tang, Yihui}, journal = {Inf. Process. Lett}, volume = {108}, year = {2008}, pages = {210--213}, abstract = {Bloom filters are a randomized data structure for membership queries dating back to 1970. Bloom filters sometimes give erroneous answers to queries, called false positives. Bloom analyzed the probability of such erroneous answers, called the false-positive rate, and Bloom's analysis has appeared in many publications throughout the years. We show that Bloom's analysis is incorrect and give a correct analysis}, www_section = {Analysis of algorithms, data structures}, issn = {0020-0190}, doi = {10.1016/j.ipl.2008.05.018}, url = {http://dx.doi.org/10.1016/j.ipl.2008.05.018}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/FalsepositiverateBloomFilter2008Bose.pdf}, }
@conference{2008_6, title = {Higher Confidence in Event Correlation Using Uncertainty Restrictions}, author = {Gerald G. Koch and Boris Koldehofe and Kurt Rothermel}, booktitle = {28th International Conference on In Distributed Computing Systems Workshops}, year = {2008}, abstract = {Distributed cooperative systems that use event notification for communication can benefit from event correlation within the notification network. In the presence of uncertain data, however, correlation results easily become unreliable. The handling of uncertainty is therefore an important challenge for event correlation in distributed event notification systems. In this paper, we present a generic correlation model that is aware of uncertainty. We propose uncertainty constraints that event correlation can take into account and show how they can lead to higher confidence in the correlation result. We demonstrate that the application of this model allows to obtain a qualitative description of event correlation}, url = {http://www.citeulike.org/user/nmsx/article/4505416}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/koch08confidence.pdf}, %%%%% ERROR: Missing field % www_section = {?????}, }
@conference{2008_7, title = {Improving User and ISP Experience through ISP-aided P2P Locality}, author = {Vinay Aggarwal and Obi Akonjang and Feldmann, Anja}, booktitle = {GI'08. Proceedings of 11th IEEE Global Internet Symposium 2008}, organization = {IEEE Computer Society}, year = {2008}, month = {April}, address = {Phoenix, AZ}, publisher = {IEEE Computer Society}, abstract = {Despite recent improvements, P2P systems are still plagued by fundamental issues such as overlay/underlay topological and routing mismatch, which affects their performance and causes traffic strains on the ISPs. In this work, we aim to improve overall system performance for ISPs as well as P2P systems by means of traffic localization through improved collaboration between ISPs and P2P systems. More specifically, we study the effects of different ISP/P2P topologies as well as a broad range of influential user behavior characteristics, namely content availability, churn, and query patterns, on end-user and ISP experience. We show that ISP-aided P2P locality benefits both P2P users and ISPs, measured in terms of improved content download times, increased network locality of query responses and desired content, and overall reduction in P2P traffic}, www_section = {isp, P2P}, isbn = {978-1-4244-2219-7}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/isp-aidedp2p.PDF}, }
@article{2008_8, title = {Privacy guarantees through distributed constraint satisfaction}, author = {Boi Faltings and Thomas Leaute and Adrian Petcu}, journal = {unknown}, institution = {Swiss Federal Institute of Technology (EPFL)}, number = {12}, year = {2008}, month = {April}, address = {Lausanne, Switzerland}, type = {Tech report}, abstract = {Abstract. In Distributed Constraint Satisfaction Problems, agents often desire to find a solution while revealing as little as possible about their variables and constraints. So far, most algorithms for DisCSP do not guarantee privacy of this information. This paper describes some simple obfuscation techniques that can be used with DisCSP algorithms such as DPOP, and provide sensible privacy guarantees based on the distributed solving process without sacrificing its efficiency}, www_section = {algorithms, DisCSP algorithm, distributed constraint satisfaction, optimization, privacy, SMC}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Tech\%20Report\%20-\%20Privacy\%20guarantees\%20through\%20DCS.pdf}, }
@article{2008_9, title = {Progressive Strategies for Monte-Carlo Tree Search}, author = {Guillaume M. J-B. Chaslot and Mark H. M. Winands and H. Jaap van den Herik and Jos W. H. M. Uiterwijk and Bruno Bouzy}, journal = {New Mathematics and Natural Computation}, volume = {4}, year = {2008}, pages = {343--357}, abstract = {Monte-Carlo Tree Search (MCTS) is a new best-first search guided by the results of Monte-Carlo simulations. In this article, we introduce two progressive strategies for MCTS, called progressive bias and progressive unpruning. They enable the use of relatively time-expensive heuristic knowledge without speed reduction. Progressive bias directs the search according to heuristic knowledge. Progressive unpruning first reduces the branching factor, and then increases it gradually again. Experiments assess that the two progressive strategies significantly improve the level of our Go program Mango. Moreover, we see that the combination of both strategies performs even better on larger board sizes}, www_section = {computer go, MCTS heuristic search, Monte-Carlo Tree Search}, doi = {10.1142/S1793005708001094}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/NMNC\%20-\%20Progressive\%20strategies\%20for\%20MCTS.pdf}, }
@article{2009_0, title = {Brahms: Byzantine Resilient Random Membership Sampling}, author = {Edward Bortnikov and Maxim Gurevich and Idit Keidar and Gabriel Kliot and Alexander Shraer}, journal = {Computer Networks Journal (COMNET), Special Issue on Gossiping in Distributed Systems}, year = {2009}, month = {April}, www_section = {Byzantine Resilient Sampling, Random Membership, random sampling}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Brahms-Comnet-Mar09.pdf , https://git.gnunet.org/bibliography.git/plain/docs/Brahms-rps-mar09.pdf}, url = {https://bibliography.gnunet.org}, }
@conference{2009_1, title = {CLIO/UNISONO: practical distributed and overlay- wide network measurement}, author = {Ralph Holz and Dirk Haage}, booktitle = {CLIO/UNISONO: practical distributed and overlay-wide network measurement}, year = {2009}, abstract = {Building on previous work, we present an early version of our CLIO/UNISONO framework for distributed network measurements. CLIO/UNISONO is a generic measurement framework specifically aimed at overlays that need measurements for optimization purposes. In this talk, we briefly introduce the most important concepts and then focus on some more advanced mechanisms like measurements across connectivity domains and remote orders}, url = {https://bibliography.gnunet.org}, www_section = {Unsorted}, }
@conference{2009_10, title = {Peer Profiling and Selection in the I2P Anonymous Network}, author = {Lars Schimmer}, booktitle = {PET-CON 2009.1}, year = {2009}, month = {March}, address = {TU Dresden, Germany}, www_section = {I2P}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/I2P-PET-CON-2009.1.pdf}, url = {https://bibliography.gnunet.org}, }
@conference{2009_11, title = {Privacy Integrated Queries: An Extensible Platform for Privacy-preserving Data Analysis}, author = {McSherry, Frank D.}, booktitle = {Proceedings of the 2009 ACM SIGMOD International Conference on Management of Data}, organization = {ACM}, year = {2009}, address = {New York, NY, USA}, publisher = {ACM}, abstract = {We report on the design and implementation of the Privacy Integrated Queries (PINQ) platform for privacy-preserving data analysis. PINQ provides analysts with a programming interface to unscrubbed data through a SQL-like language. At the same time, the design of PINQ's analysis language and its careful implementation provide formal guarantees of differential privacy for any and all uses of the platform. PINQ's unconditional structural guarantees require no trust placed in the expertise or diligence of the analysts, substantially broadening the scope for design and deployment of privacy-preserving data analysis, especially by non-experts}, www_section = {anonymization, confidentiality, Differential Privacy, linq}, isbn = {978-1-60558-551-2}, doi = {10.1145/1559845.1559850}, url = {http://doi.acm.org.eaccess.ub.tum.de/10.1145/1559845.1559850}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/PrivacyIntergratedQueries2009McSherry.pdf}, }
@article{2009_12, title = {Robust Random Number Generation for Peer-to-Peer Systems}, author = {Awerbuch, Baruch and Scheideler, Christian}, journal = {Theor. Comput. Sci}, volume = {410}, year = {2009}, pages = {453--466}, abstract = {We consider the problem of designing an efficient and robust distributed random number generator for peer-to-peer systems that is easy to implement and works even if all communication channels are public. A robust random number generator is crucial for avoiding adversarial join-leave attacks on peer-to-peer overlay networks. We show that our new generator together with a light-weight rule recently proposed in [B. Awerbuch, C. Scheideler, Towards a scalable and robust DHT, in: Proc. of the 18th ACM Symp. on Parallel Algorithms and Architectures, SPAA, 2006. See also http://www14.in.tum.de/personen/scheideler] for keeping peers well distributed can keep various structured overlay networks in a robust state even under a constant fraction of adversarial peers}, www_section = {Join-leave attacks, Peer-to-peer systems, Random number generation}, issn = {0304-3975}, doi = {10.1016/j.tcs.2008.10.003}, url = {http://dx.doi.org/10.1016/j.tcs.2008.10.003}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/OPODIS-116b.pdf}, }
@conference{2009_13, title = {Security and Privacy Challenges in the Internet of Things}, author = {Mayer, Christoph P.}, booktitle = {Proceedings of KiVS Workshop on Global Sensor Networks (GSN09)}, year = {2009}, note = {http://eceasst.cs.tu-berlin.de/index.php/eceasst/article/download/208/205}, abstract = {The future Internet of Things as an intelligent collaboration of miniaturized sensors poses new challenges to security and end-user privacy. The ITU has identified that the protection of data and privacy of users is one of the key challenges in the Internet of Things [Int05]: lack of confidence about privacy will result in decreased adoption among users and therefore is one of the driving factors in the success of the Internet of Things. This paper gives an overview, categorization, and analysis of security and privacy challenges in the Internet of Things}, url = {http://doc.tm.uka.de/2009/security-gsn-camera-ready.pdf}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/gsn09-security-mayer.pdf}, %%%%% ERROR: Missing field % www_section = {?????}, }
@book{2009_14, title = {Self-organized Data Redundancy Management for Peer-to-Peer Storage Systems}, author = {Yaser Houri and Manfred Jobmann and Thomas Fuhrmann}, booktitle = {Self-Organizing Systems}, volume = {Volume 5918/2009}, year = {2009}, pages = {65--76}, publisher = {unknown}, series = {Lecture Notes in Computer Science}, abstract = {In peer-to-peer storage systems, peers can freely join and leave the system at any time. Ensuring high data availability in such an environment is a challenging task. In this paper we analyze the costs of achieving data availability in fully decentralized peer-to-peer systems. We mainly address the problem of churn and what effect maintaining availability has on network bandwidth. We discuss two different redundancy techniques -- replication and erasure coding -- and consider their monitoring and repairing costs analytically. We calculate the bandwidth costs using basic costs equations and two different Markov reward models. One for centralized monitoring system and the other for distributed monitoring. We show a comparison of the numerical results accordingly. Depending on these results, we determine the best redundancy and maintenance strategy that corresponds to peer's failure probability}, www_section = {distributed storage, Markov chain}, issn = {978-3-642-10864-8}, doi = {10.1007/978-3-642-10865-5}, url = {http://www.springerlink.com/content/28660w27373vh408/}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/fulltext3.pdf}, }
@conference{2009_15, title = {ShadowWalker: Peer-to-peer Anonymous Communication Using Redundant Structured Topologies}, author = {Mittal, Prateek and Borisov, Nikita}, booktitle = {Proceedings of the 16th ACM Conference on Computer and Communications Security}, organization = {ACM}, year = {2009}, address = {New York, NY, USA}, publisher = {ACM}, abstract = {Peer-to-peer approaches to anonymous communication pro- mise to eliminate the scalability concerns and central vulner- ability points of current networks such as Tor. However, the P2P setting introduces many new opportunities for attack, and previous designs do not provide an adequate level of anonymity. We propose ShadowWalker: a new low-latency P2P anonymous communication system, based on a random walk over a redundant structured topology. We base our de- sign on shadows that redundantly check and certify neigh- bor information; these certifications enable nodes to perform random walks over the structured topology while avoiding route capture and other attacks. We analytically calculate the anonymity provided by Sha- dowWalker and show that it performs well for moderate lev- els of attackers, and is much better than the state of the art. We also design an extension that improves forwarding per- formance at a slight anonymity cost, while at the same time protecting against selective DoS attacks. We show that our system has manageable overhead and can handle moderate churn, making it an attractive new design for P2P anony- mous communication}, www_section = {anonymity, peer-to-peer, random walks}, isbn = {978-1-60558-894-0}, doi = {10.1145/1653662.1653683}, url = {http://doi.acm.org/10.1145/1653662.1653683}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/shadowwalker-ccs09.pdf}, }
@article{2009_16, title = {A Software and Hardware IPTV Architecture for Scalable DVB Distribution}, author = {unknown}, journal = {International Journal of Digital Multimedia Broadcasting}, volume = {2009}, year = {2009}, editor = {Georg Acher and Detlef Fliegl and Thomas Fuhrmann}, abstract = {Many standards and even more proprietary technologies deal with IP-based television (IPTV). But none of them can transparently map popular public broadcast services such as DVB or ATSC to IPTV with acceptable effort. In this paper we explain why we believe that such a mapping using a light weight framework is an important step towards all-IP multimedia. We then present the NetCeiver architecture: it is based on well-known standards such as IPv6, and it allows zero configuration. The use of multicast streaming makes NetCeiver highly scalable. We also describe a low cost FPGA implementation of the proposed NetCeiver architecture, which can concurrently stream services from up to six full transponders}, www_section = {DVB, IPTV, multicast}, doi = {10.1155/2009/617203}, url = {http://www.hindawi.com/journals/ijdmb/2009/617203.html}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/617203.pdf}, }
@mastersthesis{2009_17, title = {Solving very large distributed constraint satisfaction problems}, author = {Peter Harvey}, school = {University of Wollongog, New South Wales, Australia}, volume = {Doctor of Philosophy}, year = {2009}, month = {December}, address = {Wollongog, New South Wales, Australia}, pages = {0--211}, type = {PhD}, abstract = {This thesis investigates issues with existing approaches to distributed constraint satisfaction, and proposes a solution in the form of a new algorithm. These issues are most evident when solving large distributed constraint satisfaction problems, hence the title of the thesis. We will first survey existing algorithms for centralised constraint satisfaction, and describe how they have been modified to handle distributed constraint satisfaction. The method by which each algorithm achieves completeness will be investigated and analysed by application of a new theorem. We will then present a new algorithm, Support-Based Distributed Search, developed explicitly for distributed constraint satisfaction rather than being derived from centralised algorithms. This algorithm is inspired by the inherent structure of human arguments and similar mechanisms we observe in real-world negotiations. A number of modifications to this new algorithm are considered, and comparisons are made with existing algorithms, effectively demonstrating its place within the field. Empirical analysis is then conducted, and comparisons are made to state-of-the-art algorithms most able to handle large distributed constraint satisfaction problems. Finally, it is argued that any future development in distributed constraint satisfaction will necessitate changes in the algorithms used to solve small {\textquoteleft}embedded' constraint satisfaction problems. The impact on embedded constraint satisfaction problems is considered, with a brief presentation of an improved algorithm for hypertree decomposition}, www_section = {algorithms, distributed constraint satisfaction}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Thesis\%20-\%20P.Harvey.pdf}, }
@conference{2009_18, title = {SPINE : Adaptive Publish/Subscribe for Wireless Mesh Networks}, author = {Jorge Alfonso Briones-Garc{\i}a and Boris Koldehofe and Kurt Rothermel}, booktitle = {Proc of the 8th IEEE International Conference on Innovative Internet Community Systems (I2CS 2008)}, year = {2009}, abstract = {Application deployment on Wireless Mesh Networks (WMNs) is a challenging issue. First it requires communication abstractions that allow for interoperation with Internet applications and second the offered solution should be sensitive to the available resources in the underlying network. Loosely coupled communication abstractions, like publish/subscribe, promote interoperability, but unfortunately are typically implemented at the application layer without considering the available resources at the underlay imposing a significant degradation of application performance in the setting of Wireless Mesh Networks. In this paper we present SPINE, a content-based publish/subscribe system, which considers the particular challenges of deploying application-level services in Wireless Mesh Networks. SPINE is designed to reduce the overhead which stems from both publications and reconfigurations, to cope with the inherent capacity limitations on communication links as well as with mobility of the wireless mesh-clients. We demonstrate the effectiveness of SPINE by comparison with traditional approaches in implementing content-based publish/subscribe}, www_section = {mesh networks, publish/subscribe}, url = {http://studia.complexica.net/index.php?option=com_content\&view=article\&id=116\%3Aspine--adaptive-publishsubscribe-for-wireless-mesh-networks-pp-320-353\&catid=47\%3Anumber-3\&Itemid=89\&lang=fr}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/RI070302.pdf}, }
@booklet{2009_19, title = {SpoVNet Security Task Force Report}, author = {Ralph Holz and Mayer, Christoph P. and Sebastian Mies and Heiko Niedermayer and Tariq, Muhammad Adnan}, volume = {ISSN 1613-849X}, number = {TM-2009-2}, year = {2009}, publisher = {Institute of Telematics, Universit{\"a}t Karlsruhe (TH)}, type = {Telematics Technical Report}, url = {http://doc.tm.uka.de/2009/TM-2009-3.pdf}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/TM-2009-3.pdf}, %%%%% ERROR: Missing field % www_section = {?????}, }
@conference{2009_2, title = {A Collusion-Resistant Distributed Scalar Product Protocol with Application to Privacy-Preserving Computation of Trust}, author = {Melchor, C.A. and Ait-Salem, B. and Gaborit, P.}, booktitle = {Network Computing and Applications, 2009. NCA 2009. Eighth IEEE International Symposium on}, year = {2009}, month = {July}, abstract = {Private scalar product protocols have proved to be interesting in various applications such as data mining, data integration, trust computing, etc. In 2007, Yao et al. proposed a distributed scalar product protocol with application to privacy-preserving computation of trust [1]. This protocol is split in two phases: an homorphic encryption computation; and a private multi-party summation protocol. The summation protocol has two drawbacks: first, it generates a non-negligible communication overhead; and second, it introduces a security flaw. The contribution of this present paper is two-fold. We first prove that the protocol of [1] is not secure in the semi-honest model by showing that it is not resistant to collusion attacks and we give an example of a collusion attack, with only four participants. Second, we propose to use a superposed sending round as an alternative to the multi-party summation protocol, which results in better security properties and in a reduction of the communication costs. In particular, regarding security, we show that the previous scheme was vulnerable to collusions of three users whereas in our proposal we can t isin [1..n--1] and define a protocol resisting to collusions of up to t users}, www_section = {collaboration, collusion-resistant distributed protocol, Computer applications, computer networks, cryptographic protocols, cryptography, data privacy, distributed computing, homorphic encryption computation, Laboratories, Portable media players, privacy-preserving computation, Privacy-preserving computation of trust, private multiparty summation protocol, scalar product protocol, secure multi-party computation, Secure scalar product, security, Superposed sending., Telephony, trust computation}, doi = {10.1109/NCA.2009.48}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/CollusionResistant2009Melchor.pdf}, url = {https://bibliography.gnunet.org}, }
@conference{2009_20, title = {Towards End-to-End Connectivity for Overlays across Heterogeneous Networks}, author = {Sebastian Mies and Oliver Waldhorst and Hans Wippel}, booktitle = {Proc. Int. Workshop on the Network of the Future (Future-Net 2009), co-located with IEEE Int. Conf. on Communications (ICC 2009)}, year = {2009}, address = {Dresden, Germany}, abstract = {The incremental adoption of IPv6, middle boxes (e.g., NATs, Firewalls) as well as completely new network types and protocols paint a picture of a future Internet that consists of extremely heterogeneous edge networks (e.g. IPv4, IPv6, industrial Ethernet, sensor networks) that are not supposed or able to communicate directly. This increasing heterogeneity imposes severe challenges for overlay networks, which are considered as a potential migration strategy towards the future Internet since they can add new functionality and services in a distributed and self-organizing manner. Unfortunately, overlays are based on end-to-end connectivity and, thus, their deployment is hindered by network heterogeneity. In this paper, we take steps towards a solution to enable overlay connections in such heterogeneous networks, building upon a model of heterogeneous networks that comprises several connectivity domains with direct connectivity, interconnected by relays. As major contribution, we present a distributed protocol that detects the boundaries of connectivity domains as well as relays using a gossiping approach. Furthermore, the protocol manages unique identifiers of connectivity domains and efficiently handles domain splitting and merging due to underlay changes. Simulation studies indicate that the algorithm can handle splitting and merging of connectivity domains in reasonable time and is scalable with respect to control overhead}, isbn = {978-1-4244-3437-4}, doi = {10.1109/ICCW.2009.5207975}, url = {http://ieeexplore.ieee.org/xpl/freeabs_all.jsp?arnumber=5207975}, %%%%% ERROR: Missing field % www_section = {?????}, }
@conference{2009_21, title = {Traffic Engineering vs. Content Distribution: A Game Theoretic Perspective}, author = {Dominic DiPalantino and Ramesh Johari}, booktitle = {INFOCOM 2009. The 28th IEEE International Conference on Computer Communications}, organization = {IEEE Computer Society}, year = {2009}, month = {April}, address = {Rio de Janeiro}, pages = {540--548}, publisher = {IEEE Computer Society}, abstract = {In this paper we explore the interaction between content distribution and traffic engineering. Because a traffic engineer may be unaware of the structure of content distribution systems or overlay networks, this management of the network does not fully anticipate how traffic might change as a result of his actions. Content distribution systems that assign servers at the application level can respond very rapidly to changes in the routing of the network. Consequently, the traffic engineer's decisions may almost never be applied to the intended traffic. We use a game-theoretic framework in which infinitesimal users of a network select the source of content, and the traffic engineer decides how the traffic will route through the network. We formulate a game and prove the existence of equilibria. Additionally, we present a setting in which equilibria are socially optimal, essentially unique, and stable. Conditions under which efficiency loss may be bounded are presented, and the results are extended to the cases of general overlay networks and multiple autonomous systems}, www_section = {content distribution, traffic engineering}, isbn = {978-1-4244-3512-8}, doi = {10.1109/INFCOM.2009.5061960}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/INFOCOM\%2709\%20-\%20Traffic\%20Engineering\%20vs.\%20Content\%20Distribution.PDF}, }
@book{2009_22, title = {Tuning Vivaldi: Achieving Increased Accuracy and Stability}, author = {Benedikt Elser and Andreas F{\"o}rschler and Thomas Fuhrmann}, booktitle = {Self-Organizing Systems}, volume = {Volume 5918/2009}, year = {2009}, pages = {174--184}, publisher = {unknown}, series = {Lecture Notes in Computer Science}, abstract = {Network Coordinates are a basic building block for most peer-to-peer applications nowadays. They optimize the peer selection process by allowing the nodes to preferably attach to peers to whom they then experience a low round trip time. Albeit there has been substantial research effort in this topic over the last years, the optimization of the various network coordinate algorithms has not been pursued systematically yet. Analyzing the well-known Vivaldi algorithm and its proposed optimizations with several sets of extensive Internet traffic traces, we found that in face of current Internet data most of the parameters that have been recommended in the original papers are a magnitude too high. Based on this insight, we recommend modified parameters that improve the algorithms' performance significantly}, isbn = {978-3-642-10864-8}, issn = {0302-9743}, doi = {10.1007/978-3-642-10865-5}, url = {http://www.springerlink.com/content/h7r3q58251x72155/}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/fulltext.pdf}, %%%%% ERROR: Missing field % www_section = {?????}, }
@conference{2009_3, title = {Differentially Private Recommender Systems: Building Privacy into the Netflix Prize Contenders}, author = {McSherry, Frank and Mironov, Ilya}, booktitle = {Proceedings of the 15th ACM SIGKDD International Conference on Knowledge Discovery and Data Mining}, organization = {ACM}, year = {2009}, address = {New York, NY, USA}, publisher = {ACM}, abstract = {We consider the problem of producing recommendations from collective user behavior while simultaneously providing guarantees of privacy for these users. Specifically, we consider the Netflix Prize data set, and its leading algorithms, adapted to the framework of differential privacy. Unlike prior privacy work concerned with cryptographically securing the computation of recommendations, differential privacy constrains a computation in a way that precludes any inference about the underlying records from its output. Such algorithms necessarily introduce uncertainty--i.e., noise--to computations, trading accuracy for privacy. We find that several of the leading approaches in the Netflix Prize competition can be adapted to provide differential privacy, without significantly degrading their accuracy. To adapt these algorithms, we explicitly factor them into two parts, an aggregation/learning phase that can be performed with differential privacy guarantees, and an individual recommendation phase that uses the learned correlations and an individual's data to provide personalized recommendations. The adaptations are non-trivial, and involve both careful analysis of the per-record sensitivity of the algorithms to calibrate noise, as well as new post-processing steps to mitigate the impact of this noise. We measure the empirical trade-off between accuracy and privacy in these adaptations, and find that we can provide non-trivial formal privacy guarantees while still outperforming the Cinematch baseline Netflix provides}, www_section = {Differential Privacy, Netflix, recommender systems}, isbn = {978-1-60558-495-9}, doi = {10.1145/1557019.1557090}, url = {http://doi.acm.org/10.1145/1557019.1557090}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/PrivateRecommender2009McSherry.pdf}, }
@conference{2009_4, title = {Enhancing Application-Layer Multicast Solutions by Wireless Underlay Support}, author = {H{\"u}bsch, Christian and Oliver Waldhorst}, booktitle = {Kommunikation in Verteilten Systemen (KiVS) 2009, Kassel, Germany}, year = {2009}, abstract = {Application Layer Multicast (ALM) is an attractive solution to overcome the deployment problems of IP-Multicast. We show how to cope with the challenges of incorporating wireless devices into ALM protocols. As a rst approach we extend the NICE protocol, significantly increasing its performance in scenarios with many devices connected through wireless LAN}, www_section = {multicast}, url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.143.2935}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/nice-wli.pdf}, }
@mastersthesis{2009_5, title = {Evaluation of Current P2P-SIP Proposals with Respect to the Igor/SSR API}, author = {Markus Bucher}, school = {Technische Universit{\"a}t M{\"u}nchen}, volume = {Computer Science}, year = {2009}, address = {Munich, Germany}, type = {Diplomarbeit}, url = {https://bibliography.gnunet.org}, www_section = {Unsorted}, }
@mastersthesis{2009_6, title = {Monte-Carlo Search Techniques in the Modern Board Game Thurn and Taxis}, author = {Frederik Christiaan Schadd}, school = {Maastricht University}, volume = {Master Science of Artificial Intelligence}, year = {2009}, month = {December}, address = {Maastricht, Netherlands}, pages = {0--93}, type = {Master Thesis}, abstract = {Modern board games present a new and challenging field when researching search techniques in the field of Artificial Intelligence. These games differ to classic board games, such as chess, in that they can be non-deterministic, have imperfect information or more than two players. While tree-search approaches, such as alpha-beta pruning, have been quite successful in playing classic board games, by for instance defeating the then reigning world champion Gary Kasparov in Chess, these techniques are not as effective when applied to modern board games. This thesis investigates the effectiveness of Monte-Carlo Tree Search when applied to a modern board game, for which the board game Thurn and Taxis was used. This is a non-deterministic modern board game with imperfect information that can be played with more than 2 players, and is hence suitable for research. First, the state-space and game-tree complexities of this game are computed, from which the conclusion can be drawn that the two-player version of the game has a complexity similar to the game Shogi. Several techniques are investigated in order to improve the sampling process, for instance by adding domain knowledge. Given the results of the experiments, one can conclude that Monte-Carlo Tree Search gives a slight performance increase over standard Monte-Carlo search. In addition, the most effective improvements appeared to be the application of pseudo-random simulations and limiting simulation lengths, while other techniques have been shown to be less effective or even ineffective. Overall, when applying the best performing techniques, an AI with advanced playing strength has been created, such that further research is likely to push this performance to a strength of expert level}, www_section = {artificial intelligence, MCTS, modern board game, Monte-Carlo Tree Search, search techniques}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Thesis\%20-\%20F.Schadd.pdf}, url = {https://bibliography.gnunet.org}, }
@book{2009_7, title = {Multi Party Distributed Private Matching, Set Disjointness and Cardinality of Set Intersection with Information Theoretic Security}, author = {Sathya Narayanan, G. and Aishwarya, T. and Agrawal, Anugrah and Patra, Arpita and Choudhary, Ashish and Pandu Rangan, C}, booktitle = {Cryptology and Network Security}, organization = {Springer Berlin Heidelberg}, volume = {5888}, year = {2009}, pages = {21--40}, editor = {Garay, JuanA. and Miyaji, Atsuko and Otsuka, Akira}, publisher = {Springer Berlin Heidelberg}, series = {Lecture Notes in Computer Science}, abstract = {In this paper, we focus on the specific problems of Private Matching, Set Disjointness and Cardinality of Set Intersection in information theoretic settings. Specifically, we give perfectly secure protocols for the above problems in n party settings, tolerating a computationally unbounded semi-honest adversary, who can passively corrupt at most t < n/2 parties. To the best of our knowledge, these are the first such information theoretically secure protocols in a multi-party setting for all the three problems. Previous solutions for Distributed Private Matching and Cardinality of Set Intersection were cryptographically secure and the previous Set Disjointness solution, though information theoretically secure, is in a two party setting. We also propose a new model for Distributed Private matching which is relevant in a multi-party setting}, www_section = {Multiparty Computation, Privacy preserving Set operations}, isbn = {978-3-642-10432-9}, doi = {10.1007/978-3-642-10433-6_2}, url = {http://dx.doi.org/10.1007/978-3-642-10433-6_2}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/MultiParty2009Narayanan.pdf}, }
@book{2009_8, title = {An Optimally Fair Coin Toss}, author = {Moran, Tal and Naor, Moni and Segev, Gil}, booktitle = {Theory of Cryptography}, organization = {Springer Berlin Heidelberg}, volume = {5444}, year = {2009}, pages = {1--18}, editor = {Reingold, Omer}, publisher = {Springer Berlin Heidelberg}, series = {Lecture Notes in Computer Science}, %%%%% ERROR: Non-ASCII characters: ''Ω√'' abstract = {We address one of the foundational problems in cryptography: the bias of coin-flipping protocols. Coin-flipping protocols allow mutually distrustful parties to generate a common unbiased random bit, guaranteeing that even if one of the parties is malicious, it cannot significantly bias the output of the honest party. A classical result by Cleve [STOC '86] showed that for any two-party r-round coin-flipping protocol there exists an efficient adversary that can bias the output of the honest party by Ω(1/r). However, the best previously known protocol only guarantees O(1/√r) bias, and the question of whether Cleve's bound is tight has remained open for more than twenty years. In this paper we establish the optimal trade-off between the round complexity and the bias of two-party coin-flipping protocols. Under standard assumptions (the existence of oblivious transfer), we show that Cleve's lower bound is tight: we construct an r-round protocol with bias O(1/r)}, isbn = {978-3-642-00456-8}, doi = {10.1007/978-3-642-00457-5_1}, url = {http://dx.doi.org/10.1007/978-3-642-00457-5_1}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/OptimallyFairCoinToss2009Moran.pdf}, %%%%% ERROR: Missing field % www_section = {?????}, }
@conference{2009_9, title = {Optimization of distributed services with UNISONO}, author = {unknown}, booktitle = {GI/ITG KuVS Fachgespr{\"a}ch NGN Service Delivery Platforms \& Service Overlay Networks}, year = {2009}, abstract = {Distributed services are a special case of P2P networks where nodes have several distinctive tasks. Based on previous work, we show how UNISONO provides a way to optimize these services to increase performance, efficiency and user experience. UNISONO is a generic framework for host-based distributed network measurements. In this talk, we present UNISONO as an Enabler for self-organizing Service Delivery Plattforms. We give a short overview of the UNISONO concept and show how distributed services benefit from its usage}, www_section = {distributed systems, P2P}, url = {http://www.net.in.tum.de/de/mitarbeiter/holz/}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/unisono_kuvs-ngn.pdf}, }
@conference{2010_0, title = {Application of Random Walks to Decentralized Recommender Systems}, author = {Anne-Marie Kermarrec and Vincent Leroy and Afshin Moin and Christopher Thraves}, booktitle = {14th International Conference on Principles of Distributed Systems}, year = {2010}, month = {September}, www_section = {random walks, recommender system}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/opodis10_HAL.pdf}, url = {https://bibliography.gnunet.org}, }
@article{2010_1, title = {The Ariba Framework for Application Development using Service Overlays}, author = {H{\"u}bsch, Christian and Mayer, Christoph P. and Oliver Waldhorst}, journal = {Praxis der Informationsverarbeitung und Kommunikation}, volume = {33}, year = {2010}, pages = {7--11}, abstract = {Developing new network services in the Internet is complex and costly. This high entrance barrier has prevented new innovation in the network itself, and stuck the Internet as being mainly browser-based client/server systems. End-system based decentralized services are cheaper, but have a complexity several orders of magnitude higher than centralized systems in terms of structure and protocols. To foster development of such decentralized network services, we present the ariba framework. We show how ariba can facilitate development of end-system based decentralized services through self-organizing service overlays--flexibly deployed purely on end-systems without the need for costly infrastructure}, www_section = {overlay networks}, issn = {1865-8342}, doi = {10.1515/piko.2010.003}, url = {http://www.reference-global.com/doi/abs/10.1515/piko.2010.003}, }
@conference{2010_10, title = {Private Record Matching Using Differential Privacy}, author = {Inan, Ali and Kantarcioglu, Murat and Ghinita, Gabriel and Bertino, Elisa}, booktitle = {Proceedings of the 13th International Conference on Extending Database Technology}, organization = {ACM}, year = {2010}, address = {New York, NY, USA}, publisher = {ACM}, abstract = {Private matching between datasets owned by distinct parties is a challenging problem with several applications. Private matching allows two parties to identify the records that are close to each other according to some distance functions, such that no additional information other than the join result is disclosed to any party. Private matching can be solved securely and accurately using secure multi-party computation (SMC) techniques, but such an approach is prohibitively expensive in practice. Previous work proposed the release of sanitized versions of the sensitive datasets which allows blocking, i.e., filtering out sub-sets of records that cannot be part of the join result. This way, SMC is applied only to a small fraction of record pairs, reducing the matching cost to acceptable levels. The blocking step is essential for the privacy, accuracy and efficiency of matching. However, the state-of-the-art focuses on sanitization based on k-anonymity, which does not provide sufficient privacy. We propose an alternative design centered on differential privacy, a novel paradigm that provides strong privacy guarantees. The realization of the new model presents difficult challenges, such as the evaluation of distance-based matching conditions with the help of only a statistical queries interface. Specialized versions of data indexing structures (e.g., kd-trees) also need to be devised, in order to comply with differential privacy. Experiments conducted on the real-world Census-income dataset show that, although our methods provide strong privacy, their effectiveness in reducing matching cost is not far from that of k-anonymity based counterparts}, www_section = {Differential Privacy, privacy, record matching, security}, isbn = {978-1-60558-945-9}, doi = {10.1145/1739041.1739059}, url = {http://doi.acm.org/10.1145/1739041.1739059}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/PrivateRecordMatching2010Inan.pdf}, }
@conference{2010_11, title = {On Runtime Adaptation of Application-Layer Multicast Protocol Parameters}, author = {H{\"u}bsch, Christian and Mayer, Christoph P. and Oliver Waldhorst}, booktitle = {Proceedings of Networked Services and Applications -- Engineering, Control and Management (EUNICE)}, organization = {Springer}, year = {2010}, address = {Trondheim, Norway}, publisher = {Springer}, note = {to appear}, series = {Lecture Notes in Computer Science}, url = {http://www.tm.uni-karlsruhe.de/itm/WebMan/view.php?view=publikationen_detail\&id=389\&lang=en}, www_section = {Unsorted}, }
@conference{2010_12, title = {Scalable Application-Layer Multicast Simulations with OverSim}, author = {Stephan Krause and H{\"u}bsch, Christian}, booktitle = {7th Annual IEEE Consumer Communiations \& Networking Conference}, year = {2010}, abstract = {Application-Layer Multicast has become a promising class of protocols since IP Multicast has not found wide area deployment in the Internet. Developing such protocols requires in-depth analysis of their properties even with large numbers of participants---a characteristic which is at best hard to achieve in real network experiments. Several well-known simulation frameworks have been developed and used in recent years, but none has proved to be fitting the requirements for analyzing large-scale application-layer networks. In this paper we propose the OverSim framework as a promising simulation environment for scalabe Application-Layer Multicast research. We show that OverSim is able to manage even overlays with several thousand participants in short time while consuming comparably little memory. We compare the framework's runtime properties with the two exemplary Application-Layer Mutlicast protocols Scribe and NICE. The results show that both simulation time and memory consumption grow linearly with the number of nodes in highly feasible dimensions}, www_section = {multicast, NICE, OverSim, Scribe}, url = {https://bibliography.gnunet.org}, }
@conference{2010_13, title = {User-perceived Performance of the NICE Application Layer Multicast Protocol in Large and Highly Dynamic Groups}, author = {H{\"u}bsch, Christian and Mayer, Christoph P. and Oliver Waldhorst}, booktitle = {Proceedings of 15th International GI/ITG Conference on "Measurement, Modelling and Evaluation of Computing Systems"}, organization = {Springer Berlin, Heidelberg}, year = {2010}, month = {January}, address = {Essen, Germany}, pages = {62--77}, publisher = {Springer Berlin, Heidelberg}, note = {Best Paper Award}, abstract = {The presentation of a landmark paper by Chu et al. at SIGMETRICS 2000 introduced application layer multicast (ALM) as completely new area of network research. Many researchers have since proposed ALM protocols, and have shown that these protocols only put a small burden on the network in terms of link-stress and -stretch. However, since the network is typically not a bottleneck, user acceptance remains the limiting factor for the deployment of ALM. In this paper we present an in-depth study of the user-perceived performance of the NICE ALM protocol. We use the OverSim simulation framework to evaluate delay experienced by a user and bandwidth consumption on the user's access link in large multicast groups and under aggressive churn models. Our major results are (1) latencies grow moderate with increasing number of nodes as clusters get optimized, (2) join delays get optimized over time, and (3) despite being a tree-dissemination protocol NICE handles churn surprisingly well when adjusting heartbeat intervals accordingly. We conclude that NICE comes up to the user's expectations even for large groups and under high churn. This work was partially funded as part of the Spontaneous Virtual Networks (SpoVNet) project by the Landesstiftung Baden-W{\"u}rttemberg within the BW-FIT program and as part of the Young Investigator Group Controlling Heterogeneous and Dynamic Mobile Grid and Peer-to-Peer Systems (CoMoGriP) by the Concept for the Future of Karlsruhe Institute of Technology (KIT) within the framework of the German Excellence Initiative}, isbn = {978-3-642-12103-6}, doi = {10.1007/978-3-642-12104-3}, url = {http://www.springerlink.com/content/t6k421560103540n/}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/User-Perceived\%20Performance\%20of\%20the\%20NICE\%20Application\%20Layer\%20Multicast\%20Protocol\%20in\%20Large\%20and\%20Highly\%20Dynamic\%20Groups_1.pdf}, www_section = {Unsorted}, }
@conference{2010_14, title = {Using Legacy Applications in Future Heterogeneous Networks with ariba}, author = {H{\"u}bsch, Christian and Mayer, Christoph P. and Sebastian Mies and Roland Bless and Oliver Waldhorst and Martina Zitterbart}, booktitle = {Proceedings of IEEE INFOCOM}, year = {2010}, address = {San Diego, CA, USA}, note = {Demo}, url = {https://bibliography.gnunet.org}, www_section = {Unsorted}, }
@conference{2010_2, title = {Autonomous NAT Traversal}, author = {Andreas M{\"u}ller and Nathan S Evans and Christian Grothoff and Samy Kamkar}, booktitle = {10th IEEE International Conference on Peer-to-Peer Computing (IEEE P2P'10)}, organization = {IEEE}, year = {2010}, address = {Delft, The Netherlands}, publisher = {IEEE}, abstract = {Traditional NAT traversal methods require the help of a third party for signalling. This paper investigates a new autonomous method for establishing connections to peers behind NAT. The proposed method for Autonomous NAT traversal uses fake ICMP messages to initially contact the NATed peer. This paper presents how the method is supposed to work in theory, discusses some possible variations, introduces various concrete implementations of the proposed approach and evaluates empirical results of a measurement study designed to evaluate the efficacy of the idea in practice}, www_section = {GNUnet, ICMP, NAT, P2P}, url = {http://grothoff.org/christian/pwnat.pdf}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/pwnat.pdf}, }
@mastersthesis{2010_3, title = {Developing Peer-to-Peer Web Applications}, author = {Toni Ruottu}, school = {University of Helsinki}, volume = {M.S}, year = {2010}, month = {September}, address = {Helsinki}, pages = {0--66}, type = {Master's Thesis}, abstract = {As the virtual world grows more complex, finding a standard way for storing data becomes increasingly important. Ideally, each data item would be brought into the computer system only once. References for data items need to be cryptographically verifiable, so the data can maintain its identity while being passed around. This way there will be only one copy of the users family photo album, while the user can use multiple tools to show or manipulate the album. Copies of users data could be stored on some of his family members computer, some of his computers, but also at some online services which he uses. When all actors operate over one replicated copy of the data, the system automatically avoids a single point of failure. Thus the data will not disappear with one computer breaking, or one service provider going out of business. One shared copy also makes it possible to delete a piece of data from all systems at once, on users request. In our research we tried to find a model that would make data manageable to users, and make it possible to have the same data stored at various locations. We studied three systems, Persona, Freenet, and GNUnet, that suggest different models for protecting user data. The main application areas of the systems studied include securing online social networks, providing anonymous web, and preventing censorship in file-sharing. Each of the systems studied store user data on machines belonging to third parties. The systems differ in measures they take to protect their users from data loss, forged information, censorship, and being monitored. All of the systems use cryptography to secure names used for the content, and to protect the data from outsiders. Based on the gained knowledge, we built a prototype platform called Peerscape, which stores user data in a synchronized, protected database. Data items themselves are protected with cryptography against forgery, but not encrypted as the focus has been disseminating the data directly among family and friends instead of letting third parties store the information. We turned the synchronizing database into peer-to-peer web by revealing its contents through an integrated http server. The REST-like http API supports development of applications in javascript. To evaluate the platform's suitability for application development we wrote some simple applications, including a public chat room, bittorrent site, and a flower growing game. During our early tests we came to the conclusion that using the platform for simple applications works well. As web standards develop further, writing applications for the platform should become easier. Any system this complex will have its problems, and we are not expecting our platform to replace the existing web, but are fairly impressed with the results and consider our work important from the perspective of managing user data}, www_section = {content centric, ECRS, Freenet, GNUnet, P2P, Peerscape, Persona}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/twr-dp2pwa.pdf}, url = {https://bibliography.gnunet.org}, }
@article{2010_4, title = {On the Difficulties of Disclosure Prevention in Statistical Databases or The Case for Differential Privacy}, author = {Cynthia Dwork and Moni Naor}, journal = {Journal of Privacy and Confidentiality}, volume = {2}, year = {2010}, pages = {93--107}, abstract = {In 1977 Tore Dalenius articulated a desideratum for statistical databases: nothing about an individual should be learnable from the database that cannot be learned without access to the database. We give a general impossibility result showing that a natural formalization of Dalenius' goal cannot be achieved if the database is useful. The key obstacle is the side information that may be available to an adversary. Our results hold under very general conditions regarding the database, the notion of privacy violation, and the notion of utility.Contrary to intuition, a variant of the result threatens the privacy even of someone not in the database. This state of affairs motivated the notion of differential privacy [15, 16], a strong ad omnia privacy which, intuitively, captures the increased risk to one's privacy incurred by participating in a database}, url = {http://research.microsoft.com/apps/pubs/default.aspx?id=135704}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/DisclousrePrevention2010Dwork.pdf}, www_section = {Unsorted}, }
@book{2010_5, title = {Drac: An Architecture for Anonymous Low-Volume Communications}, author = {Danezis, George and Claudia Diaz and Troncoso, Carmela and Laurie, Ben}, booktitle = {Privacy Enhancing Technologies}, organization = {Springer Berlin Heidelberg}, volume = {6205}, year = {2010}, pages = {202--219}, editor = {Atallah, MikhailJ. and Hopper, Nicholas J}, publisher = {Springer Berlin Heidelberg}, series = {Lecture Notes in Computer Science}, www_section = {anonymous communication, anonymous IM, anonymous voice, Drac, F2F}, isbn = {978-3-642-14526-1}, doi = {10.1007/978-3-642-14527-8_12}, url = {http://dx.doi.org/10.1007/978-3-642-14527-8_12}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/drac-pet2010.pdf}, }
@conference{2010_6, title = {Efficient DHT attack mitigation through peers' ID distribution}, author = {Cholez, Thibault and Chrisment, Isabelle and Festor, Olivier}, booktitle = {HOTP2P'10--International Workshop on Hot Topics in Peer-to-Peer Systems}, year = {2010}, month = {April}, address = {Atlanta, Georgia, USA}, abstract = {We present a new solution to protect the widely deployed KAD DHT against localized attacks which can take control over DHT entries. We show through measurements that the IDs distribution of the best peers found after a lookup process follows a geometric distribution. We then use this result to detect DHT attacks by comparing real peers' ID distributions to the theoretical one thanks to the Kullback-Leibler divergence. When an attack is detected, we propose countermeasures that progressively remove suspicious peers from the list of possible contacts to provide a safe DHT access. Evaluations show that our method detects the most efficient attacks with a very small false-negative rate, while countermeasures successfully filter almost all malicious peers involved in an attack. Moreover, our solution completely fits the current design of the KAD network and introduces no network overhead}, www_section = {attack detection, attack mitigation, distributed hash table, IDs distribution, KAD, Sybil attack}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/HotP2P\%2710\%20-\%20KAD\%20DHT\%20attack\%20mitigation.pdf}, url = {https://bibliography.gnunet.org}, }
@conference{2010_7, title = {How Accurately Can One's Interests Be Inferred from Friends?}, author = {Wen, Zhen and Lin, Ching-Yung}, booktitle = {Proceedings of the 19th International Conference on World Wide Web}, organization = {ACM}, year = {2010}, address = {New York, NY, USA}, publisher = {ACM}, abstract = {Search and recommendation systems must effectively model user interests in order to provide personalized results. The proliferation of social software makes social network an increasingly important source for user interest modeling, be- cause of the social influence and correlation among friends. However, there are large variations in people's contribution of social content. Therefore, it is impractical to accurately model interests for all users. As a result, applications need to decide whether to utilize a user interest model based on its accuracy. To address this challenge, we present a study on the accuracy of user interests inferred from three types of social content: social bookmarking, file sharing, and electronic communication, in an organizational social network within a large-scale enterprise. First, we demonstrate that combining different types of social content to infer user interests outperforms methods that use only one type of social content. Second, we present a technique to predict the inference accuracy based on easily observed network characteristics, including user activeness, network in-degree, out-degree, and betweenness centrality}, www_section = {accuracy, social networks, user modeling}, isbn = {978-1-60558-799-8}, doi = {10.1145/1772690.1772875}, url = {http://doi.acm.org/10.1145/1772690.1772875}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/InterestsInference2010Wen.pdf}, }
@mastersthesis{2010_8, title = {Incentive-driven QoS in peer-to-peer overlays}, author = {Raul Leonardo Landa Gamiochipi}, school = {University College London}, volume = {PhD}, year = {2010}, month = {May}, address = {London}, pages = {0--209}, abstract = {A well known problem in peer-to-peer overlays is that no single entity has control over the software, hardware and configuration of peers. Thus, each peer can selfishly adapt its behaviour to maximise its benefit from the overlay. This thesis is concerned with the modelling and design of incentive mechanisms for QoS-overlays: resource allocation protocols that provide strategic peers with participation incentives, while at the same time optimising the performance of the peer-to-peer distribution overlay. The contributions of this thesis are as follows. First, we present PledgeRoute, a novel contribution accounting system that can be used, along with a set of reciprocity policies, as an incentive mechanism to encourage peers to contribute resources even when users are not actively consuming overlay services. This mechanism uses a decentralised credit network, is resilient to sybil attacks, and allows peers to achieve time and space deferred contribution reciprocity. Then, we present a novel, QoS-aware resource allocation model based on Vickrey auctions that uses PledgeRoute as a substrate. It acts as an incentive mechanism by providing efficient overlay construction, while at the same time allocating increasing service quality to those peers that contribute more to the network. The model is then applied to lagsensitive chunk swarming, and some of its properties are explored for different peer delay distributions. When considering QoS overlays deployed over the best-effort Internet, the quality received by a client cannot be adjudicated completely to either its serving peer or the intervening network between them. By drawing parallels between this situation and well-known hidden action situations in microeconomics, we propose a novel scheme to ensure adherence to advertised QoS levels. We then apply it to delay-sensitive chunk distribution overlays and present the optimal contract payments required, along with a method for QoS contract enforcement through reciprocative strategies. We also present a probabilistic model for application-layer delay as a function of the prevailing network conditions. Finally, we address the incentives of managed overlays, and the prediction of their behaviour. We propose two novel models of multihoming managed overlay incentives in which overlays can freely allocate their traffic flows between different ISPs. One is obtained by optimising an overlay utility function with desired properties, while the other is designed for data-driven least-squares fitting of the cross elasticity of demand. This last model is then used to solve for ISP profit maximisation}, www_section = {BitTorrent, Freeloading, game theory, incentives, PeerLive, prices, QoS}, url = {http://eprints.ucl.ac.uk/19490/}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/19490.pdf}, }
@article{2010_9, title = {Malugo: A peer-to-peer storage system}, author = {Chan, Yu-Wei and Ho, Tsung-Hsuan and Shih, Po-Chi and Chung, Yeh-Ching}, journal = {unknown}, year = {2010}, abstract = {We consider the problem of routing locality in peer-to-peer storage systems where peers store and exchange data among themselves. With the global information, peers will take the data locality into consideration when they implement their replication mechanisms to keep a number of file replicas all over the systems. In this paper, we mainly propose a peer-to-peer storage system--Malugo. Algorithms for the implementation of the peers' locating and file operation processes are also presented. Simulation results show that the proposed system successfully constructs an efficient and stable peer-to-peer storage environment with considerations of data and routing locality among peers}, www_section = {distributed storage, Malugo, peer-to-peer storage}, doi = {10.1504/IJAHUC.2010.032995}, url = {http://www.ingentaconnect.com/content/ind/ijahuc/2010/00000005/00000004/art00002;jsessionid=kcpun0o76hoe.alexandra}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Malugo.pdf}, }
@article{2011_0, title = {Collaborative Personalized Top-k Processing}, author = {Bai, Xiao and Guerraoui, Rachid and Kermarrec, Anne-Marie and Leroy, Vincent}, journal = {ACM Trans. Database Syst}, volume = {36}, year = {2011}, pages = {26:1--26:38}, abstract = {This article presents P4Q, a fully decentralized gossip-based protocol to personalize query processing in social tagging systems. P4Q dynamically associates each user with social acquaintances sharing similar tagging behaviors. Queries are gossiped among such acquaintances, computed on-the-fly in a collaborative, yet partitioned manner, and results are iteratively refined and returned to the querier. Analytical and experimental evaluations convey the scalability of P4Q for top-k query processing, as well its inherent ability to cope with users updating profiles and departing}, www_section = {gossip, Peer-to-peer networks, Personalization, top-k processing}, issn = {0362-5915}, doi = {10.1145/2043652.2043659}, url = {http://doi.acm.org/10.1145/2043652.2043659}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/TopK-Processing2011Bai.pdf}, }
@article{2011_1, title = {A comprehensive study of Convergent and Commutative Replicated Data Types}, author = {Marc Shapiro and Nuno Preguica and Carlos Baquero and Marek Zawirski}, journal = {unknown}, institution = {INRIA Rocquencourt}, number = {7506}, year = {2011}, month = {January}, address = {Le Chensay Cedex}, abstract = {Eventual consistency aims to ensure that replicas of some mutable shared object converge without foreground synchronisation. Previous approaches to eventual con- sistency are ad-hoc and error-prone. We study a principled approach: to base the design of shared data types on some simple formal conditions that are sufficient to guarantee even- tual consistency. We call these types Convergent or Commutative Replicated Data Types (CRDTs). This paper formalises asynchronous object replication, either state based or op- eration based, and provides a sufficient condition appropriate for each case. It describes several useful CRDTs, including container data types supporting both add and remove op- erations with clean semantics, and more complex types such as graphs, montonic DAGs, and sequences. It discusses some properties needed to implement non-trivial CRDTs}, www_section = {commutative operations, data replication, optimistic replication}, isbn = {0249-6399}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/crdt.pdf}, url = {https://bibliography.gnunet.org}, }
@conference{2011_11, title = {On the Relation Between Differential Privacy and Quantitative Information Flow}, author = {Alvim, M{\'a}rio S. and Andr{\'e}s, Miguel E.}, booktitle = {Proceedings of the 38th International Conference on Automata, Languages and Programming--Volume Part II}, organization = {Springer-Verlag}, year = {2011}, address = {Berlin, Heidelberg}, publisher = {Springer-Verlag}, abstract = {Differential privacy is a notion that has emerged in the community of statistical databases, as a response to the problem of protecting the privacy of the database's participants when performing statistical queries. The idea is that a randomized query satisfies differential privacy if the likelihood of obtaining a certain answer for a database x is not too different from the likelihood of obtaining the same answer on adjacent databases, i.e. databases which differ from x for only one individual. Information flow is an area of Security concerned with the problem of controlling the leakage of confidential information in programs and protocols. Nowadays, one of the most established approaches to quantify and to reason about leakage is based on the R{\'e}nyi min entropy version of information theory. In this paper, we analyze critically the notion of differential privacy in light of the conceptual framework provided by the R{\'e}nyi min information theory. We show that there is a close relation between differential privacy and leakage, due to the graph symmetries induced by the adjacency relation. Furthermore, we consider the utility of the randomized answer, which measures its expected degree of accuracy. We focus on certain kinds of utility functions called {\textquotedblleft}binary{\textquotedblright}, which have a close correspondence with the R{\'e}nyi min mutual information. Again, it turns out that there can be a tight correspondence between differential privacy and utility, depending on the symmetries induced by the adjacency relation and by the query. Depending on these symmetries we can also build an optimal-utility randomization mechanism while preserving the required level of differential privacy. Our main contribution is a study of the kind of structures that can be induced by the adjacency relation and the query, and how to use them to derive bounds on the leakage and achieve the optimal utility}, isbn = {978-3-642-22011-1}, url = {http://dl.acm.org/citation.cfm?id=2027223.2027228}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/DifferentialPrivacy2011Alvim.pdf}, www_section = {Unsorted}, }
@conference{2011_12, title = {Scalability \& Paranoia in a Decentralized Social Network}, author = {Carlo v. Loesch and Gabor X Toth and Mathias Baumann}, booktitle = {Federated Social Web}, year = {2011}, month = {June}, address = {Berlin, Germany}, abstract = {There's a lot of buzz out there about "replacing" Facebook with a privacy-enhanced, decentralized, ideally open source something. In this talk we'll focus on how much privacy we should plan for (specifically about how we cannot entrust our privacy to modern virtual machine technology) and the often underestimated problem of getting such a monster network to function properly. These issues can be considered together or separately: Even if you're not as concerned about privacy as we are, the scalability problem still persists}, www_section = {GNUnet, privacy, social networks}, url = {https://secushare.org/2011-FSW-Scalability-Paranoia}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/2011-FSW-Scalability-Paranoia.pdf}, }
@article{2011_13, title = {Secure collaborative supply chain planning and inverse optimization--The JELS model}, author = {Richard Pibernik and Yingying Zhang and Florian Kerschbaum and Axel Schr{\"o}pfer}, journal = {European Journal of Operations Research}, volume = {208}, year = {2011}, month = {January}, pages = {75--85}, abstract = {It is a well-acknowledged fact that collaboration between different members of a supplychain yields a significant potential to increase overall supplychain performance. Sharing private information has been identified as prerequisite for collaboration and, at the same time, as one of its major obstacles. One potential avenue for overcoming this obstacle is Secure Multi-Party Computation (SMC). SMC is a cryptographic technique that enables the computation of any (well-defined) mathematical function by a number of parties without any party having to disclose its input to another party. In this paper, we show how SMC can be successfully employed to enable joint decision-making and benefit sharing in a simple supplychain setting. We develop secure protocols for implementing the well-known {\textquotedblleft}Joint Economic Lot Size (JELS) Model{\textquotedblright} with benefit sharing in such a way that none of the parties involved has to disclose any private (cost and capacity) data. Thereupon, we show that although computation of the model's outputs can be performed securely, the approach still faces practical limitations. These limitations are caused by the potential of {\textquotedblleft}inverseoptimization{\textquotedblright}, i.e., a party can infer another party's private data from the output of a collaborativeplanning scheme even if the computation is performed in a secure fashion. We provide a detailed analysis of {\textquotedblleft}inverseoptimization{\textquotedblright} potentials and introduce the notion of {\textquotedblleft}stochastic security{\textquotedblright}, a novel approach to assess the additional information a party may learn from joint computation and benefit sharing. Based on our definition of {\textquotedblleft}stochastic security{\textquotedblright} we propose a stochastic benefit sharing rule, develop a secure protocol for this benefit sharing rule, and assess under which conditions stochastic benefit sharing can guarantee secure collaboration}, www_section = {collaboration, information sharing, secure multi-party computation, SMC, supplychain management}, doi = {http://dx.doi.org/10.1016/j.ejor.2010.08.018}, url = {http://www.sciencedirect.com/science/article/pii/S0377221710005552}, }
@book{2011_14, title = {Social Market: Combining Explicit and Implicit Social Networks}, author = {Frey, Davide and J{\'e}gou, Arnaud and Kermarrec, Anne-Marie}, booktitle = {Stabilization, Safety, and Security of Distributed Systems}, organization = {Springer Berlin Heidelberg}, volume = {6976}, year = {2011}, pages = {193--207}, editor = {D{\'e}fago, Xavier and Petit, Franck and Villain, Vincent}, publisher = {Springer Berlin Heidelberg}, series = {Lecture Notes in Computer Science}, abstract = {The pervasiveness of the Internet has lead research and applications to focus more and more on their users. Online social networks such as Facebook provide users with the ability to maintain an unprecedented number of social connections. Recommendation systems exploit the opinions of other users to suggest movies or products based on our similarity with them. This shift from machines to users motivates the emergence of novel applications and research challenges. In this paper, we embrace the social aspects of the Web 2.0 by considering a novel problem. We build a distributed social market that combines interest-based social networks with explicit networks like Facebook. Our Social Market (SM) allows users to identify and build connections to other users that can provide interesting goods, or information. At the same time, it backs up these connections with trust, by associating them with paths of trusted users that connect new acquaintances through the explicit network. This convergence of implicit and explicit networks yields TAPS, a novel gossip protocol that can be applied in applications devoted to commercial transactions, or to add robustness to standard gossip applications like dissemination or recommendation systems}, isbn = {978-3-642-24549-7}, doi = {10.1007/978-3-642-24550-3_16}, url = {http://dx.doi.org/10.1007/978-3-642-24550-3_16}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/SocialMarket2011Frey.pdf}, www_section = {Unsorted}, }
@conference{2011_15, title = {SWIRL: A Scalable Watermark to Detect Correlated Network Flows}, author = {Amir Houmansadr and Borisov, Nikita}, booktitle = {NDSS'11--Proceedings of the Network and Distributed Security Symposium}, year = {2011}, month = {February}, address = {San Diego, CA, USA}, abstract = {Flow watermarks are active traffic analysis techniques that help establish a causal connection between two network flows by content-independent manipulations, e.g., altering packet timings. Watermarks provide a much more scalable approach for flow correlation than passive traffic analysis. Previous designs of scalable watermarks, however, were subject to multi-flow attacks. They also introduced delays too large to be used in most environments. We design SWIRL, a Scalable Watermark that is Invisible and Resilient to packet Losses. SWIRL is the first watermark that is practical to use for large-scale traffic analysis. SWIRL uses a flow-dependent approach to resist multi-flow attacks, marking each flow with a different pattern. SWIRL is robust to packet losses and network jitter, yet it introduces only small delays that are invisible to both benign users and determined adversaries. We analyze the performance of SWIRL both analytically and on the PlanetLab testbed, demonstrating very low error rates. We consider applications of SWIRL to stepping stone detection and linking anonymous communication. We also propose a novel application of watermarks to defend against congestion attacks on Tor}, www_section = {anonymity, SWIRL, traffic analysis, watermarking}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/NDSS11-2.pdf}, url = {https://bibliography.gnunet.org}, }
@conference{2011_16, title = {"You Might Also Like:" Privacy Risks of Collaborative Filtering}, author = {Calandrino, J.A. and Kilzer, A. and Narayanan, A. and Felten, E.W. and Shmatikov, V.}, booktitle = {Security and Privacy (SP), 2011 IEEE Symposium on}, year = {2011}, month = {May}, abstract = {Many commercial websites use recommender systems to help customers locate products and content. Modern recommenders are based on collaborative filtering: they use patterns learned from users' behavior to make recommendations, usually in the form of related-items lists. The scale and complexity of these systems, along with the fact that their outputs reveal only relationships between items (as opposed to information about users), may suggest that they pose no meaningful privacy risk. In this paper, we develop algorithms which take a moderate amount of auxiliary information about a customer and infer this customer's transactions from temporal changes in the public outputs of a recommender system. Our inference attacks are passive and can be carried out by any Internet user. We evaluate their feasibility using public data from popular websites Hunch, Last.fm, LibraryThing, and Amazon}, www_section = {accuracy, Amazon, collaboration, collaborative filtering, commercial Web sites, consumer behaviour, Covariance matrix, customer transactions, data privacy, groupware, History, Hunch, Inference algorithms, inference attacks, inference mechanisms, information filtering, Internet, Internet user, Last.fm, Library Thing, privacy, privacy risks, recommender systems, Web sites}, doi = {10.1109/SP.2011.40}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Youmightlike2011Calandrino.pdf}, url = {https://bibliography.gnunet.org}, }
@conference{2011_2, title = {Considering Complex Search Techniques in DHTs under Churn}, author = {Jamie Furness and Mario Kolberg}, booktitle = {CCNC 2011--IEEE Consumer Communications and Networking Conference}, organization = {IEEE Computer Society}, year = {2011}, month = {January}, address = {Las Vegas, NV, USA}, publisher = {IEEE Computer Society}, abstract = {Traditionally complex queries have been performed over unstructured P2P networks by means of flooding, which is inherently inefficient due to the large number of redundant messages generated. While Distributed Hash Tables (DHTs) can provide very efficient look-up operations, they traditionally do not provide any methods for complex queries. By exploiting the structure inherent in DHTs we can perform complex querying over structured P2P networks by means of efficiently broadcasting the search query. This allows every node in the network to process the query locally, and hence is as powerful and flexible as flooding in unstructured networks, but without the inefficiency of redundant messages. While there have been various approaches proposed for broadcasting search queries over DHTs, the focus has not been on validation under churn. Comparing blind search methods for DHTs though simulation we see that churn, in particular nodes leaving the network, has a large impact on query success rate. In this paper we present novel results comparing blind search over Chord and Pastry while under varying levels of churn. We further consider how different data replication strategies can be used to enhance the query success rate}, www_section = {churn, complex querie, distributed hash table, search techniques}, url = {https://bibliography.gnunet.org}, isbn = {978-1-4244-8789-9}, doi = {http://dx.doi.org/10.1109/CCNC.2011.5766542}, }
@article{2011_3, title = {Distributed Private Data Analysis: On Simultaneously Solving How and What}, author = {Amos Beimel and Kobbi Nissim and Eran Omri}, journal = {CoRR}, volume = {abs/1103.2626}, year = {2011}, abstract = {We examine the combination of two directions in the field of privacy concerning computations over distributed private inputs--secure function evaluation (SFE) and differential privacy. While in both the goal is to privately evaluate some function of the individual inputs, the privacy requirements are significantly different. The general feasibility results for SFE suggest a natural paradigm for implementing differentially private analyses distributively: First choose what to compute, i.e., a differentially private analysis; Then decide how to compute it, i.e., construct an SFE protocol for this analysis. We initiate an examination whether there are advantages to a paradigm where both decisions are made simultaneously. In particular, we investigate under which accuracy requirements it is beneficial to adapt this paradigm for computing a collection of functions including binary sum, gap threshold, and approximate median queries. Our results imply that when computing the binary sum of n distributed inputs then: * When we require that the error is o(n{\surd}) and the number of rounds is constant, there is no benefit in the new paradigm. * When we allow an error of O(n{\surd}), the new paradigm yields more efficient protocols when we consider protocols that compute symmetric functions. Our results also yield new separations between the local and global models of computations for private data analysis}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/DistributedPrivateData2008Beimel.pdf}, www_section = {Unsorted}, url = {https://bibliography.gnunet.org}, }
@article{2011_4, title = {High-speed high-security signatures}, author = {Daniel J. Bernstein and Niels Duif and Tanja Lange and Peter Schwabe and Bo-Yin Hang}, journal = {Journal of Cryptographic Engineering}, volume = {2}, year = {2011}, month = {September}, chapter = {77}, pages = {77--89}, www_section = {ECC, Ed25519, EdDSA, GNUnet}, url = {http://ed25519.cr.yp.to/papers.html}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/ed25519-20110926.pdf}, }
@book{2011_5, title = {How Much Is Enough? Choosing {\epsilon} for Differential Privacy}, author = {Lee, Jaewoo and Clifton, Chris}, booktitle = {Information Security}, organization = {Springer Berlin Heidelberg}, volume = {7001}, year = {2011}, pages = {325--340}, editor = {Lai, Xuejia and Zhou, Jianying and Li, Hui}, publisher = {Springer Berlin Heidelberg}, series = {Lecture Notes in Computer Science}, abstract = {Differential privacy is a recent notion, and while it is nice conceptually it has been difficult to apply in practice. The parameters of differential privacy have an intuitive theoretical interpretation, but the implications and impacts on the risk of disclosure in practice have not yet been studied, and choosing appropriate values for them is non-trivial. Although the privacy parameter {\epsilon} in differential privacy is used to quantify the privacy risk posed by releasing statistics computed on sensitive data, {\epsilon} is not an absolute measure of privacy but rather a relative measure. In effect, even for the same value of {\epsilon} , the privacy guarantees enforced by differential privacy are different based on the domain of attribute in question and the query supported. We consider the probability of identifying any particular individual as being in the database, and demonstrate the challenge of setting the proper value of {\epsilon} given the goal of protecting individuals in the database with some fixed probability}, www_section = {Differential Privacy, Privacy Parameter, epsilon}, isbn = {978-3-642-24860-3}, doi = {10.1007/978-3-642-24861-0_22}, url = {http://dx.doi.org/10.1007/978-3-642-24861-0_22}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Choosing-\%CE\%B5-2011Lee.pdf}, }
@mastersthesis{2011_6, title = {Methods for Secure Decentralized Routing in Open Networks}, author = {Nathan S Evans}, school = {Technische Universit{\"a}t M{\"u}nchen}, volume = {Dr. rer. nat}, year = {2011}, month = {August}, address = {Garching bei M{\"u}nchen}, pages = {0--234}, abstract = {The contribution of this thesis is the study and improvement of secure, decentralized, robust routing algorithms for open networks including ad-hoc networks and peer-to-peer (P2P) overlay networks. The main goals for our secure routing algorithm are openness, efficiency, scalability and resilience to various types of attacks. Common P2P routing algorithms trade-off decentralization for security; for instance by choosing whether or not to require a centralized authority to allow peers to join the network. Other algorithms trade scalability for security, for example employing random search or flooding to prevent certain types of attacks. Our design attempts to meet our security goals in an open system, while limiting the performance penalties incurred. The first step we took towards designing our routing algorithm was an analysis of the routing algorithm in Freenet. This algorithm is relevant because it achieves efficient (order O(log n)) routing in realistic network topologies in a fully decentralized open network. However, we demonstrate why their algorithm is not secure, as malicious participants are able to severely disrupt the operation of the network. The main difficulty with the Freenet routing algorithm is that for performance it relies on information received from untrusted peers. We also detail a range of proposed solutions, none of which we found to fully fix the problem. A related problem for efficient routing in sparsely connected networks is the difficulty in sufficiently populating routing tables. One way to improve connectivity in P2P overlay networks is by utilizing modern NAT traversal techniques. We employ a number of standard NAT traversal techniques in our approach, and also developed and experimented with a novel method for NAT traversal based on ICMP and UDP hole punching. Unlike other NAT traversal techniques ours does not require a trusted third party. Another technique we use in our implementation to help address the connectivity problem in sparse networks is the use of distance vector routing in a small local neighborhood. The distance vector variant used in our system employs onion routing to secure the resulting indirect connections. Materially to this design, we discovered a serious vulnerability in the Tor protocol which allowed us to use a DoS attack to reduce the anonymity of the users of this extant anonymizing P2P network. This vulnerability is based on allowing paths of unrestricted length for onion routes through the network. Analyzing Tor and implementing this attack gave us valuable knowledge which helped when designing the distance vector routing protocol for our system. Finally, we present the design of our new secure randomized routing algorithm that does not suffer from the various problems we discovered in previous designs. Goals for the algorithm include providing efficiency and robustness in the presence of malicious participants for an open, fully decentralized network without trusted authorities. We provide a mathematical analysis of the algorithm itself and have created and deployed an implementation of this algorithm in GNUnet. In this thesis we also provide a detailed overview of a distributed emulation framework capable of running a large number of nodes using our full code base as well as some of the challenges encountered in creating and using such a testing framework. We present extensive experimental results showing that our routing algorithm outperforms the dominant DHT design in target topologies, and performs comparably in other scenarios}, www_section = {distributed hash table, Freenet, GNUnet, NAT, R5N, Tor}, isbn = {3-937201-26-2}, issn = {1868-2642}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/NET-2011-08-1.pdf}, url = {https://bibliography.gnunet.org}, }
@article{2011_7, title = {Multi-objective optimization based privacy preserving distributed data mining in Peer-to-Peer networks}, author = {Das, Kamalika and Bhaduri, Kanishka and Kargupta, Hillol}, journal = {Peer-to-Peer Networking and Applications}, volume = {4}, year = {2011}, pages = {192--209}, abstract = {This paper proposes a scalable, local privacy-preserving algorithm for distributed Peer-to-Peer (P2P) data aggregation useful for many advanced data mining/analysis tasks such as average/sum computation, decision tree induction, feature selection, and more. Unlike most multi-party privacy-preserving data mining algorithms, this approach works in an asynchronous manner through local interactions and it is highly scalable. It particularly deals with the distributed computation of the sum of a set of numbers stored at different peers in a P2P network in the context of a P2P web mining application. The proposed optimization-based privacy-preserving technique for computing the sum allows different peers to specify different privacy requirements without having to adhere to a global set of parameters for the chosen privacy model. Since distributed sum computation is a frequently used primitive, the proposed approach is likely to have significant impact on many data mining tasks such as multi-party privacy-preserving clustering, frequent itemset mining, and statistical aggregate computation}, www_section = {Data mining, peer-to-peer, Privacy preserving}, issn = {1936-6442}, doi = {10.1007/s12083-010-0075-1}, url = {http://dx.doi.org/10.1007/s12083-010-0075-1}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Privacy_PPNA2011Das.pdf}, }
@mastersthesis{2011_8, title = {Privacy-Implications of Performance-Based Peer Selection by Onion-Routers: A Real-World Case Study using I2P}, author = {Michael Herrmann}, school = {Technische Universit{\"a}t M{\"u}nchen}, volume = {M.S}, year = {2011}, month = {March}, address = {Garching bei M{\"u}nchen}, pages = {0--59}, type = {M.S}, abstract = {The Invisible Internet Project (I2P) is one of the most widely used anonymizing Peer-to-Peer networks on the Internet today. Like Tor, it uses onion routing to build tunnels between peers as the basis for providing anonymous communication channels. Unlike Tor, I2P integrates a range of anonymously hosted services directly with the platform. This thesis presents a new attack on the I2P Peer-to-Peer network, with the goal of determining the identity of peers that are anonymously hosting HTTP (Eepsite) services in the network. Key design choices made by I2P developers, in particular performance-based peer selection, enable a sophisticated adversary with modest resources to break key security assumptions. Our attack first obtains an estimate of the victim's view of the network. Then, the adversary selectively targets a small number of peers used by the victim with a denial-of-service attack while giving the victim the opportunity to replace those peers with other peers that are controlled by the adversary. Finally, the adversary performs some simple measurements to determine the identity of the peer hosting the service. This thesis provides the necessary background on I2P, gives details on the attack --- including experimental data from measurements against the actual I2P network --- and discusses possible solutions}, www_section = {anonymity, attack, denial-of-service, I2P}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/herrmann2011mt.pdf}, url = {https://bibliography.gnunet.org}, }
@book{2011_9, title = {Private Similarity Computation in Distributed Systems: From Cryptography to Differential Privacy}, author = {Alaggan, Mohammad and Gambs, S{\'e}bastien and Kermarrec, Anne-Marie}, booktitle = {Principles of Distributed Systems}, organization = {Springer Berlin Heidelberg}, volume = {7109}, year = {2011}, pages = {357--377}, editor = {Fern{\`a}ndez Anta, Antonio and Lipari, Giuseppe and Roy, Matthieu}, publisher = {Springer Berlin Heidelberg}, series = {Lecture Notes in Computer Science}, abstract = {In this paper, we address the problem of computing the similarity between two users (according to their profiles) while preserving their privacy in a fully decentralized system and for the passive adversary model. First, we introduce a two-party protocol for privately computing a threshold version of the similarity and apply it to well-known similarity measures such as the scalar product and the cosine similarity. The output of this protocol is only one bit of information telling whether or not two users are similar beyond a predetermined threshold. Afterwards, we explore the computation of the exact and threshold similarity within the context of differential privacy. Differential privacy is a recent notion developed within the field of private data analysis guaranteeing that an adversary that observes the output of the differentially private mechanism, will only gain a negligible advantage (up to a privacy parameter) from the presence (or absence) of a particular item in the profile of a user. This provides a strong privacy guarantee that holds independently of the auxiliary knowledge that the adversary might have. More specifically, we design several differentially private variants of the exact and threshold protocols that rely on the addition of random noise tailored to the sensitivity of the considered similarity measure. We also analyze their complexity as well as their impact on the utility of the resulting similarity measure. Finally, we provide experimental results validating the effectiveness of the proposed approach on real datasets}, www_section = {Differential Privacy, homomorphic encryption, privacy, similarity measure}, isbn = {978-3-642-25872-5}, doi = {10.1007/978-3-642-25873-2_25}, url = {http://dx.doi.org/10.1007/978-3-642-25873-2_25}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/PrivateSimilarity2011Alaggan.pdf}, }
@book{2012_0, title = {BLIP: Non-interactive Differentially-Private Similarity Computation on Bloom filters}, author = {Alaggan, Mohammad and Gambs, S{\'e}bastien and Kermarrec, Anne-Marie}, booktitle = {Stabilization, Safety, and Security of Distributed Systems}, organization = {Springer Berlin Heidelberg}, volume = {7596}, year = {2012}, pages = {202--216}, editor = {Richa, Andr{\'e}aW. and Scheideler, Christian}, publisher = {Springer Berlin Heidelberg}, series = {Lecture Notes in Computer Science}, abstract = {In this paper, we consider the scenario in which the profile of a user is represented in a compact way, as a Bloom filter, and the main objective is to privately compute in a distributed manner the similarity between users by relying only on the Bloom filter representation. In particular, we aim at providing a high level of privacy with respect to the profile even if a potentially unbounded number of similarity computations take place, thus calling for a non-interactive mechanism. To achieve this, we propose a novel non-interactive differentially private mechanism called BLIP (for BLoom-and-flIP) for randomizing Bloom filters. This approach relies on a bit flipping mechanism and offers high privacy guarantees while maintaining a small communication cost. Another advantage of this non-interactive mechanism is that similarity computation can take place even when the user is offline, which is impossible to achieve with interactive mechanisms. Another of our contributions is the definition of a probabilistic inference attack, called the {\textquotedblleft}Profile Reconstruction attack{\textquotedblright}, that can be used to reconstruct the profile of an individual from his Bloom filter representation. More specifically, we provide an analysis of the protection offered by BLIP against this profile reconstruction attack by deriving an upper and lower bound for the required value of the differential privacy parameter {\epsilon}}, isbn = {978-3-642-33535-8}, doi = {10.1007/978-3-642-33536-5_20}, url = {http://dx.doi.org/10.1007/978-3-642-33536-5_20}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/BLIP2012Alaggan.pdf}, www_section = {Unsorted}, }
@conference{2012_1, title = {CRISP: Collusion-resistant Incentive-compatible Routing and Forwarding in Opportunistic Networks}, author = {Sadiq, Umair and Kumar, Mohan and Wright, Matthew}, booktitle = {Proceedings of the 15th ACM International Conference on Modeling, Analysis and Simulation of Wireless and Mobile Systems}, organization = {ACM}, year = {2012}, address = {New York, NY, USA}, publisher = {ACM}, www_section = {black-hole attack, collusion, credit schemes, delay tolerant networks, flooding, incentive schemes, mobile peer-to-peer networks, opportunistic networks}, isbn = {978-1-4503-1628-6}, doi = {10.1145/2387238.2387253}, url = {http://doi.acm.org/10.1145/2387238.2387253}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/crisp-mswim.pdf}, }
@conference{2012_10, title = {NTALG--TCP NAT traversal with application-level gateways}, author = {Wander, M. and Holzapfel, S. and Wacker, A. and Weis, T.}, booktitle = {Consumer Communications and Networking Conference (CCNC), 2012 IEEE}, year = {2012}, abstract = {Consumer computers or home communication devices are usually connected to the Internet via a Network Address Translation (NAT) router. This imposes restrictions for networking applications that require inbound connections. Existing solutions for NAT traversal can remedy the restrictions, but still there is a fraction of home users which lack support of it, especially when it comes to TCP. We present a framework for traversing NAT routers by exploiting their built-in FTP and IRC application-level gateways (ALG) for arbitrary TCP-based applications. While this does not work in every scenario, it significantly improves the success chance without requiring any user interaction at all. To demonstrate the framework, we show a small test setup with laptop computers and home NAT routers}, www_section = {FTP-ALG, NAT}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/WHW_12-NTALG.pdf}, url = {https://bibliography.gnunet.org}, }
@article{2012_11, title = {Octopus: A Secure and Anonymous DHT Lookup}, author = {Wang, Qiyan and Borisov, Nikita}, journal = {CoRR}, volume = {abs/1203.2668}, year = {2012}, www_section = {anonymity, distributed hash table}, url = {http://dblp.uni-trier.de/db/journals/corr/corr1203.html$\#$abs-1203-2668}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/octopus_dht.pdf}, }
@article{2012_12, title = {Personalization and privacy: a survey of privacy risks and remedies in personalization-based systems}, author = {Toch, Eran and Wang, Yang and Cranor, LorrieFaith}, journal = {User Modeling and User-Adapted Interaction}, volume = {22}, year = {2012}, pages = {203--220}, abstract = {Personalization technologies offer powerful tools for enhancing the user experience in a wide variety of systems, but at the same time raise new privacy concerns. For example, systems that personalize advertisements according to the physical location of the user or according to the user's friends' search history, introduce new privacy risks that may discourage wide adoption of personalization technologies. This article analyzes the privacy risks associated with several current and prominent personalization trends, namely social-based personalization, behavioral profiling, and location-based personalization. We survey user attitudes towards privacy and personalization, as well as technologies that can help reduce privacy risks. We conclude with a discussion that frames risks and technical solutions in the intersection between personalization and privacy, as well as areas for further investigation. This frameworks can help designers and researchers to contextualize privacy challenges of solutions when designing personalization systems}, www_section = {e-commerce, Human--computer interaction, Location-based services, Personalization, privacy, social networks}, issn = {0924-1868}, doi = {10.1007/s11257-011-9110-z}, url = {http://dx.doi.org/10.1007/s11257-011-9110-z}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Personalization2012Toch.pdf}, }
@article{2012_13, title = {Saturn: Range Queries, Load Balancing and Fault Tolerance in DHT Data Systems}, author = {Theoni Pitoura and Nikos Ntarmos and Peter Triantafillou}, journal = {IEEE Transactions on Knowledge and Data Engineering}, volume = {24}, year = {2012}, month = {July}, chapter = {1313}, abstract = {In this paper, we present Saturn, an overlay architecture for large-scale data networks maintained over Distributed Hash Tables (DHTs) that efficiently processes range queries and ensures access load balancing and fault-tolerance. Placing consecutive data values in neighboring peers is desirable in DHTs since it accelerates range query processing; however, such a placement is highly susceptible to load imbalances. At the same time, DHTs may be susceptible to node departures/failures and high data availability and fault tolerance are significant issues. Saturn deals effectively with these problems through the introduction of a novel multiple ring, order-preserving architecture. The use of a novel order-preserving hash function ensures fast range query processing. Replication across and within data rings (termed vertical and horizontal replication) forms the foundation over which our mechanisms are developed, ensuring query load balancing and fault tolerance, respectively. Our detailed experimentation study shows strong gains in range query processing efficiency, access load balancing, and fault tolerance, with low replication overheads. The significance of Saturn is not only that it effectively tackles all three issues together{\textemdash}i.e., supporting range queries, ensuring load balancing, and providing fault tolerance over DHTs{\textemdash}but also that it can be applied on top of any order-preserving DHT enabling it to dynamically handle replication and, thus, to trade off replication costs for fair load distribution and fault tolerance}, www_section = {distributed hash table, load balancing, range queries, Saturn}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/saturn-range-dht.pdf}, url = {https://bibliography.gnunet.org}, }
@article{2012_14, title = {The state-of-the-art in personalized recommender systems for social networking}, author = {Zhou, Xujuan and Xu, Yue and Li, Yuefeng and Josang, Audun and Cox, Clive}, journal = {Artificial Intelligence Review}, volume = {37}, year = {2012}, pages = {119--132}, abstract = {With the explosion of Web 2.0 application such as blogs, social and professional networks, and various other types of social media, the rich online information and various new sources of knowledge flood users and hence pose a great challenge in terms of information overload. It is critical to use intelligent agent software systems to assist users in finding the right information from an abundance of Web data. Recommender systems can help users deal with information overload problem efficiently by suggesting items (e.g., information and products) that match users' personal interests. The recommender technology has been successfully employed in many applications such as recommending films, music, books, etc. The purpose of this report is to give an overview of existing technologies for building personalized recommender systems in social networking environment, to propose a research direction for addressing user profiling and cold start problems by exploiting user-generated content newly available in Web 2.0}, www_section = {recommender systems, Social networking, trust, User generated content, user profiles}, issn = {0269-2821}, doi = {10.1007/s10462-011-9222-1}, url = {http://dx.doi.org/10.1007/s10462-011-9222-1}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/PersonalizedRecommender2012Zhou.pdf}, }
@article{2012_15, title = {A Survey of Monte Carlo Tree Search Methods}, author = {Cameron Browne and Edward Powley and Daniel Whitehouse and Simon Lucas and Peter I. Cowling and Philipp Rohlfshagen and Stephen Tavener and Diego Perez and Spyridon Samothrakis and Simon Colton}, journal = {IEEE Transactions on Computational Intelligence and AI in Games}, volume = {4}, year = {2012}, month = {March}, pages = {1--43}, abstract = {Monte Carlo tree search (MCTS) is a recently proposed search method that combines the precision of tree search with the generality of random sampling. It has received considerable interest due to its spectacular success in the difficult problem of computer Go, but has also proved beneficial in a range of other domains. This paper is a survey of the literature to date, intended to provide a snapshot of the state of the art after the first five years of MCTS research. We outline the core algorithm's derivation, impart some structure on the many variations and enhancements that have been proposed, and summarize the results from the key game and nongame domains to which MCTS methods have been applied. A number of open research questions indicate that the field is ripe for future work}, www_section = {AI, artificial intelligence, bandit-based methods, computer go., game search, MCTS, monte carlo tree search, UCB, UCT, upper confidence bounds, upper confidence bounds for trees}, issn = {1943-068X}, doi = {10.1109/TCIAIG.2012.2186810}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Browne\%20et\%20al\%20-\%20A\%20survey\%20of\%20MCTS\%20methods.pdf}, url = {https://bibliography.gnunet.org}, }
@article{2012_16, title = {Theory and Practice of Bloom Filters for Distributed Systems}, author = {Tarkoma, S. and Rothenberg, C.E. and Lagerspetz, E.}, journal = {Communications Surveys Tutorials, IEEE}, volume = {14}, year = {2012}, month = {January}, pages = {131--155}, abstract = {Many network solutions and overlay networks utilize probabilistic techniques to reduce information processing and networking costs. This survey article presents a number of frequently used and useful probabilistic techniques. Bloom filters and their variants are of prime importance, and they are heavily used in various distributed systems. This has been reflected in recent research and many new algorithms have been proposed for distributed systems that are either directly or indirectly based on Bloom filters. In this survey, we give an overview of the basic and advanced techniques, reviewing over 20 variants and discussing their application in distributed systems, in particular for caching, peer-to-peer systems, routing and forwarding, and measurement data summarization}, www_section = {Unsorted}, issn = {1553-877X}, doi = {10.1109/SURV.2011.031611.00024}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/TheoryandPracticeBloomFilter2011Tarkoma.pdf}, url = {https://bibliography.gnunet.org}, }
@conference{2012_17, title = {User Interests Driven Web Personalization Based on Multiple Social Networks}, author = {Zeng, Yi and Zhong, Ning and Ren, Xu and Wang, Yan}, booktitle = {Proceedings of the 4th International Workshop on Web Intelligence \&\#38; Communities}, organization = {ACM}, year = {2012}, address = {New York, NY, USA}, publisher = {ACM}, abstract = {User related data indicate user interests in a certain environment. In the context of massive data from the Web, if an application wants to provide more personalized service (e.g. search) for users, an investigation on user interests is needed. User interests are usually distributed in different sources. In order to provide a more comprehensive understanding, user related data from multiple sources need to be integrated together for deeper analysis. Web based social networks have become typical platforms for extracting user interests. In addition, there are various types of interests from these social networks. In this paper, we provide an algorithmic framework for retrieving semantic data based on user interests from multiple sources (such as multiple social networking sites). We design several algorithms to deal with interests based retrieval based on single and multiple types of interests. We utilize publication data from Semantic Web Dog Food (which can be considered as an academic collaboration based social network), and microblogging data from Twitter to validate our framework. The Active Academic Visit Recommendation Application (AAVRA) is developed as a concrete usecase to show the potential effectiveness of the proposed framework for user interests driven Web personalization based on multiple social networks}, www_section = {interest analysis, search refinement, web personalization}, isbn = {978-1-4503-1189-2}, doi = {10.1145/2189736.2189749}, url = {http://doi.acm.org/10.1145/2189736.2189749}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/WebPersonalization2012Zeng.pdf}, }
@mastersthesis{2012_2, title = {Decentralized Evaluation of Regular Expressions for Capability Discovery in Peer-to-Peer Networks}, author = {Maximilian Szengel}, school = {Technische Universit{\"a}t M{\"u}nchen}, volume = {M.S}, year = {2012}, month = {November}, address = {Garching bei M{\"u}nchen}, pages = {0--100}, type = {Masters}, abstract = {This thesis presents a novel approach for decentralized evaluation of regular expressions for capability discovery in DHT-based overlays. The system provides support for announcing capabilities expressed as regular expressions and discovering participants offering adequate capabilities. The idea behind our approach is to convert regular expressions into finite automatons and store the corresponding states and transitions in a DHT. We show how locally constructed DFA are merged in the DHT into an NFA without the knowledge of any NFA already present in the DHT and without the need for any central authority. Furthermore we present options of optimizing the DFA. There exist several possible applications for this general approach of decentralized regular expression evaluation. However, in this thesis we focus on the application of discovering users that are willing to provide network access using a specified protocol to a particular destination. We have implemented the system for our proposed approach and conducted a simulation. Moreover we present the results of an emulation of the implemented system in a cluster}, www_section = {DFA, distributed hash table, GNUnet, NFA, regular expressions, search}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/szengel2012ms.pdf}, url = {https://bibliography.gnunet.org}, }
@mastersthesis{2012_3, title = {Design and Implementation of a Censorship Resistant and Fully Decentralized Name System}, author = {Martin Schanzenbach}, school = {TU Munich}, volume = {M.Sc}, year = {2012}, month = {September}, address = {Garching bei M{\"u}nchen}, pages = {0--116}, type = {Master's}, abstract = {This thesis presents the design and implementation of the GNU Alternative Domain System (GADS), a decentralized, secure name system providing memorable names for the Internet as an alternative to the Domain Name System (DNS). The system builds on ideas from Rivest's Simple Distributed Security Infrastructure (SDSI) to address a central issue with providing a decentralized mapping of secure identifiers to memorable names: providing a global, secure and memorable mapping is impossible without a trusted authority. SDSI offers an alternative by linking local name spaces; GADS uses the transitivity provided by the SDSI design to build a decentralized and censorship resistant name system without a trusted root based on secure delegation of authority. Additional details need to be considered in order to enable GADS to integrate smoothly with the World Wide Web. While following links on the Web matches following delegations in GADS, the existing HTTP-based infrastructure makes many assumptions about globally unique names; however, proxies can be used to enable legacy applications to function with GADS. This work presents the fundamental goals and ideas behind GADS, provides technical details on how GADS has been implemented and discusses deployment issues for using GADS with existing systems. We discuss how GADS and legacy DNS can interoperate during a transition period and what additional security advantages GADS offers over DNS with Security Extensions (DNSSEC). Finally, we present the results of a survey into surfing behavior, which suggests that the manual introduction of new direct links in GADS will be infrequent}, www_section = {censorship resistance, decentralized, DNS, GNU Name System, GNUnet}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/schanzen2012msc.pdf}, url = {https://bibliography.gnunet.org}, }
@book{2012_4, title = {Differential Privacy with Imperfect Randomness}, author = {Dodis, Yevgeniy and L{\'o}pez-Alt, Adriana and Mironov, Ilya and Vadhan, Salil}, booktitle = {Advances in Cryptology -- CRYPTO 2012}, organization = {Springer Berlin Heidelberg}, volume = {7417}, year = {2012}, pages = {497--516}, editor = {Safavi-Naini, Reihaneh and Canetti, Ran}, publisher = {Springer Berlin Heidelberg}, series = {Lecture Notes in Computer Science}, abstract = {In this work we revisit the question of basing cryptography on imperfect randomness. Bosley and Dodis (TCC'07) showed that if a source of randomness R is {\textquotedblleft}good enough{\textquotedblright} to generate a secret key capable of encrypting k bits, then one can deterministically extract nearly k almost uniform bits from R, suggesting that traditional privacy notions (namely, indistinguishability of encryption) requires an {\textquotedblleft}extractable{\textquotedblright} source of randomness. Other, even stronger impossibility results are known for achieving privacy under specific {\textquotedblleft}non-extractable{\textquotedblright} sources of randomness, such as the {\gamma}-Santha-Vazirani (SV) source, where each next bit has fresh entropy, but is allowed to have a small bias {\gamma} < 1 (possibly depending on prior bits). We ask whether similar negative results also hold for a more recent notion of privacy called differential privacy (Dwork et al., TCC'06), concentrating, in particular, on achieving differential privacy with the Santha-Vazirani source. We show that the answer is no. Specifically, we give a differentially private mechanism for approximating arbitrary {\textquotedblleft}low sensitivity{\textquotedblright} functions that works even with randomness coming from a {\gamma}-Santha-Vazirani source, for any {\gamma} < 1. This provides a somewhat surprising {\textquotedblleft}separation{\textquotedblright} between traditional privacy and differential privacy with respect to imperfect randomness. Interestingly, the design of our mechanism is quite different from the traditional {\textquotedblleft}additive-noise{\textquotedblright} mechanisms (e.g., Laplace mechanism) successfully utilized to achieve differential privacy with perfect randomness. Indeed, we show that any (non-trivial) {\textquotedblleft}SV-robust{\textquotedblright} mechanism for our problem requires a demanding property called consistent sampling, which is strictly stronger than differential privacy, and cannot be satisfied by any additive-noise mechanism}, isbn = {978-3-642-32008-8}, doi = {10.1007/978-3-642-32009-5_29}, url = {http://dx.doi.org/10.1007/978-3-642-32009-5_29}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/DPwithImperfectRandomness2012Dodis.pdf}, www_section = {Unsorted}, }
@article{2012_5, title = {Efficient and Secure Decentralized Network Size Estimation}, author = {Nathan S Evans and Polot, Bartlomiej and Christian Grothoff}, journal = {unknown}, institution = {Technische Universit{\"a}t M{\"u}nchen}, year = {2012}, month = {May}, address = {Garching bei M{\"u}nchen}, abstract = {The size of a Peer-to-Peer (P2P) network is an important parameter for performance tuning of P2P routing algorithms. This paper introduces and evaluates a new efficient method for participants in an unstructured P2P network to establish the size of the overall network. The presented method is highly efficient, propagating information about the current size of the network to all participants using O(|E|) operations where |E| is the number of edges in the network. Afterwards, all nodes have the same network size estimate, which can be made arbitrarily accurate by averaging results from multiple rounds of the protocol. Security measures are included which make it prohibitively expensive for a typical active participating adversary to significantly manipulate the estimates. This paper includes experimental results that demonstrate the viability, efficiency and accuracy of the protocol}, www_section = {GNUnet, network security, network size estimation, peer-to-peer networking}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/nse-techreport.pdf}, url = {https://bibliography.gnunet.org}, }
@conference{2012_6, title = {Efficient and Secure Decentralized Network Size Estimation}, author = {Nathan S Evans and Polot, Bartlomiej and Christian Grothoff}, booktitle = {IFIP International Conferences on Networking (Networking 2012)}, organization = {Springer Verlag}, year = {2012}, month = {May}, address = {Prague, CZ}, pages = {304--317}, publisher = {Springer Verlag}, abstract = {The size of a Peer-to-Peer (P2P) network is an important parameter for performance tuning of P2P routing algorithms. This paper introduces and evaluates a new efficient method for participants in an unstructured P2P network to establish the size of the overall network. The presented method is highly efficient, propagating information about the current size of the network to all participants using O(|E|) operations where |E| is the number of edges in the network. Afterwards, all nodes have the same network size estimate, which can be made arbitrarily accurate by averaging results from multiple rounds of the protocol. Security measures are included which make it prohibitively expensive for a typical active participating adversary to significantly manipulate the estimates. This paper includes experimental results that demonstrate the viability, efficiency and accuracy of the protocol}, www_section = {byzantine fault tolerance, GNUnet, network size estimation, proof of work}, url = {http://grothoff.org/christian/rrsize2012.pdf}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/paper-ifip.pdf}, }
@conference{2012_7, title = {Koi: A Location-Privacy Platform for Smartphone Apps}, author = {Saikat Guha and Mudit Jain and Venkata Padmanabhan}, booktitle = {Proceedings of the 9th Symposium on Networked Systems Design and Implementation (NSDI)}, year = {2012}, month = {April}, address = {San Jose, CA}, abstract = {With mobile phones becoming first-class citizens in the online world, the rich location data they bring to the table is set to revolutionize all aspects of online life including content delivery, recommendation systems, and advertising. However, user-tracking is a concern with such location-based services, not only because location data can be linked uniquely to individuals, but because the low-level nature of current location APIs and the resulting dependence on the cloud to synthesize useful representations virtually guarantees such tracking. In this paper, we propose privacy-preserving location-based matching as a fundamental platform primitive and as an alternative to exposing low-level, latitude-longitude (lat-long) coordinates to applications. Applications set rich location-based triggers and have these be fired based on location updates either from the local device or from a remote device (e.g., a friend's phone). Our Koi platform, comprising a privacy-preserving matching service in the cloud and a phone-based agent, realizes this primitive across multiple phone and browser platforms. By masking low-level lat-long information from applications, Koi not only avoids leaking privacy-sensitive information, it also eases the task of programmers by providing a higher-level abstraction that is easier for applications to build upon. Koi's privacy-preserving protocol prevents the cloud service from tracking users. We verify the non-tracking properties of Koi using a theorem prover, illustrate how privacy guarantees can easily be added to a wide range of location-based applications, and show that our public deployment is performant, being able to perform 12K matches per second on a single core}, www_section = {location privacy, matching}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/nsdi12-koi.pdf}, url = {https://bibliography.gnunet.org}, }
@book{2012_8, title = {Lower Bounds in Differential Privacy}, author = {De, Anindya}, booktitle = {Theory of Cryptography}, organization = {Springer Berlin Heidelberg}, volume = {7194}, year = {2012}, pages = {321--338}, editor = {Cramer, Ronald}, publisher = {Springer Berlin Heidelberg}, series = {Lecture Notes in Computer Science}, abstract = {This paper is about private data analysis, in which a trusted curator holding a confidential database responds to real vector-valued queries. A common approach to ensuring privacy for the database elements is to add appropriately generated random noise to the answers, releasing only these noisy responses. A line of study initiated in [7] examines the amount of distortion needed to prevent privacy violations of various kinds. The results in the literature vary according to several parameters, including the size of the database, the size of the universe from which data elements are drawn, the {\textquotedblleft}amount{\textquotedblright} of privacy desired, and for the purposes of the current work, the arity of the query. In this paper we sharpen and unify these bounds. Our foremost result combines the techniques of Hardt and Talwar [11] and McGregor et al. [13] to obtain linear lower bounds on distortion when providing differential privacy for a (contrived) class of low-sensitivity queries. (A query has low sensitivity if the data of a single individual has small effect on the answer.) Several structural results follow as immediate corollaries: We separate so-called counting queries from arbitrary low-sensitivity queries, proving the latter requires more noise, or distortion, than does the former; We separate ({\epsilon},0)-differential privacy from its well-studied relaxation ({\epsilon},{\delta})-differential privacy, even when {\delta} {\epsilon} 2- o(n) is negligible in the size n of the database, proving the latter requires less distortion than the former; We demonstrate that ({\epsilon},{\delta})-differential privacy is much weaker than ({\epsilon},0)-differential privacy in terms of mutual information of the transcript of the mechanism with the database, even when {\delta} {\epsilon} 2- o(n) is negligible in the size n of the database. We also simplify the lower bounds on noise for counting queries in [11] and also make them unconditional. Further, we use a characterization of ({\epsilon},{\delta}) differential privacy from [13] to obtain lower bounds on the distortion needed to ensure ({\epsilon},{\delta})-differential privacy for {\epsilon},{\delta} > 0. We next revisit the LP decoding argument of [10] and combine it with a recent result of Rudelson [15] to improve on a result of Kasiviswanathan et al. [12] on noise lower bounds for privately releasing l-way marginals}, www_section = {Differential Privacy, LP decoding}, isbn = {978-3-642-28913-2}, doi = {10.1007/978-3-642-28914-9_18}, url = {http://dx.doi.org/10.1007/978-3-642-28914-9_18}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/LowerBoundsDP2012De.pdf}, }
@mastersthesis{2012_9, title = {Monkey: Automated debugging of deployed distributed systems}, author = {Safey A. Halim}, school = {Technische Universit{\"a}t M{\"u}nchen}, volume = {M.S}, year = {2012}, month = {July}, address = {Garching bei M{\"u}nchen}, pages = {0--78}, type = {Masters}, abstract = {Debugging is tedious and time consuming work that, for certain types of bugs, can and should be automated. Debugging distributed systems is more complex due to time dependencies between interacting processes. Another related problem is duplicate bug reports in bug repositories. Finding bug duplicates is hard and wastes developers' time which may affect the development team's rate of bug fixes and new releases. In this master thesis we introduce Monkey, a new tool that provides a solution for automated classification, investigation and characterization of bugs, as well as a solution for comparing bug reports and avoiding duplicates. Our tool is particularly suitable for distributed systems due to its autonomy. We present Monkey's key design goals and architecture and give experimental results demonstrating the viability of our approach}, www_section = {automation, debugging, distributed systems}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/safey-thesis-monkey.pdf , https://git.gnunet.org/bibliography.git/plain/docs/safey-presentation-monkey.pdf}, url = {https://bibliography.gnunet.org}, }
@mastersthesis{2013_1, title = {Large Scale Distributed Evaluation of Peer-to-Peer Protocols}, author = {Totakura, Sree Harsha}, school = {Technische Universit{\"a}t M{\"u}nchen}, volume = {Master of Science}, year = {2013}, month = {June}, address = {Garching bei M{\"u}nchen}, pages = {0--76}, type = {Masters}, abstract = {Evaluations of P2P protocols during the system's design and implementation phases are commonly done through simulation and emulation respectively. While the current state-of-the-art simulation allows evaluations with many millions of peers through the use of abstractions, emulation still lags behind as it involves executing the real implementation at some parts of the system. This difference in scales can make it hard to relate the evaluations made created with simulation and emulation during the design and implementation phases and can results in a limited evaluation of the implementation, which may cause severe problems after deployment. In this thesis, we build upon an existing emulator for P2P applications to push the scales offered by emulation towards the limits set by simulation. Our approach distributes and co-ordinates the emulation across many hosts. Large deployments are possible by deploying hundreds or thousands of peers on each host. To address the varying needs of an experimenter and the range of available hardware, we make our approach scalable such that it can easily be adapted to run evaluations on a single machine or a large group of hosts. Specifically, the system automatically adjusts the number of overlapping operations to the available resources efficiently using a feedback mechanism, thus relieving the experimenter from the hassles of manual tuning. We specifically target HPC systems like compute clusters and supercomputers and demonstrate how such systems can be used for large scale emulations by evaluating two P2P applications with deployment sizes up to 90k peers on a supercomputer}, www_section = {Unsorted}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/thesis_lowres.pdf , https://git.gnunet.org/bibliography.git/plain/docs/thesis.pdf}, url = {https://bibliography.gnunet.org}, }
@mastersthesis{2013_2, title = {Monkey--Generating Useful Bug Reports Automatically}, author = {Markus Teich}, school = {Technische Universit{\"a}t M{\"u}nchen}, volume = {Bachelor}, year = {2013}, month = {July}, address = {Munich}, pages = {0--50}, type = {Bachelor Thesis}, abstract = {Automatic crash handlers support software developers in finding bugs and fixing the problems in their code. Most of them behave similarly in providing the developer with a (symbolic) stack trace and a memory dump of the crashed application. This introduces some problems that we try to fix with our proposed automatic bug reporting system called "Monkey". In this paper we describe the problems that occur when debugging widely distributed systems and how Monkey handles them. First, we describe our Motivation for develop- ing the Monkey system. Afterwards we present the most common existing automatic crash handlers and how they work. Thirdly you will get an overview of the Monkey system and its components. In the fourth chapter we will analyze one report gener- ated by Monkey, evaluate an online experiment we conducted and present some of our finding during the development of the clustering algorithm used to categorize crash reports. Last, we discuss some of Monkeys features and compare them to the existing approaches. Also some ideas for the future development of the Monkey system are presented before we conclude that Monkey's approach is promising, but some work is still left to establish Monkey in the open source community}, www_section = {Unsorted}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/main_0.pdf}, url = {https://bibliography.gnunet.org}, }
@conference{2013_3, title = {Persea: A Sybil-resistant Social DHT}, author = {Al-Ameen, Mahdi N. and Matthew Wright}, booktitle = {Proceedings of the Third ACM Conference on Data and Application Security and Privacy}, organization = {ACM}, year = {2013}, address = {New York, NY, USA}, publisher = {ACM}, abstract = {P2P systems are inherently vulnerable to Sybil attacks, in which an attacker can have a large number of identities and use them to control a substantial fraction of the system. We propose Persea, a novel P2P system that is more robust against Sybil attacks than prior approaches. Persea derives its Sybil resistance by assigning IDs through a bootstrap tree, the graph of how nodes have joined the system through invitations. More specifically, a node joins Persea when it gets an invitation from an existing node in the system. The inviting node assigns a node ID to the joining node and gives it a chunk of node IDs for further distribution. For each chunk of ID space, the attacker needs to socially engineer a connection to another node already in the system. This hierarchical distribution of node IDs confines a large attacker botnet to a considerably smaller region of the ID space than in a normal P2P system. Persea uses a replication mechanism in which each (key,value) pair is stored in nodes that are evenly spaced over the network. Thus, even if a given region is occupied by attackers, the desired (key,value) pair can be retrieved from other regions. We compare our results with Kad, Whanau, and X-Vine and show that Persea is a better solution against Sybil attacks. collapse}, www_section = {security, social dht, Sybil attack}, isbn = {978-1-4503-1890-7}, doi = {10.1145/2435349.2435372}, url = {http://doi.acm.org.eaccess.ub.tum.de/10.1145/2435349.2435372}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/p169-al-ameen.pdf}, }
@article{2013_4, title = {Public Key Pinning for TLS Using a Trust on First Use Model}, author = {Gabor X Toth}, journal = {unknown}, year = {2013}, editor = {Tjebbe Vlieg}, abstract = {Although the Public Key Infrastructure (PKI) using X.509 is meant to prevent the occurrence of man-in-the-middle attacks on TLS, there are still situations in which such attacks are possible due to the large number of Certification Authorities (CA) that has to be trusted. Recent incidents involving CA compromises, which lead to issuance of rogue certificates indicate the weakness of the PKI model. Recently various public key pinning protocols -- such as DANE or TACK -- have been proposed to thwart man-in-the-middle attacks on TLS connections. It will take a longer time, however, until any of these protocols reach wide deployment. We present an approach intended as an interim solution to bridge this gap and provide protection for connections to servers not yet using a pinning protocol. The presented method is based on public key pinning with a trust on first use model, and can be combined with existing notary approaches as well}, www_section = {Unsorted}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/tofu-pinning.pdf}, url = {https://bibliography.gnunet.org}, }
@mastersthesis{2013_5, title = {Speeding Up Tor with SPDY}, author = {Andrey Uzunov}, school = {Technische Universit{\"a}t M{\"u}nchen}, volume = {Master's in Computer Science}, year = {2013}, month = {November}, address = {Garching bei M{\"u}nchen}, pages = {0--124}, type = {Master's}, abstract = {SPDY is a rather new protocol which is an alternative to HTTP. It was designed to address inefficiencies in the latter and thereby improve latency and reduce bandwidth consumption. This thesis presents the design and implementation of a setup for utilizing SPDY within the anonymizing Tor network for reducing latency and traffic in the latter. A C library implementing the SPDY server protocol is introduced together with an HTTP to SPDY and a SPDY to HTTP proxy which are the base for the presented design. Furthermore, we focus on the SPDY server push feature which allows servers to send multiple responses to a single request for reducing latency and traffic on loading web pages. We propose a prediction algorithm for employing push at SPDY servers and proxies. The algorithm makes predictions based on previous requests and responses and initially does not know anything about the data which it will push. This thesis includes extensive measurement data highlighting the possible benefits of using SPDY instead of HTTP and HTTPS (1.0 or 1.1), especially with respect to networks experiencing latency or loss. Moreover, the real profit from using SPDY within the Tor network on loading some of the most popular web sites is presented. Finally, evaluations of the proposed push prediction algorithm are given for emphasizing the possible gain of employing it at SPDY reverse and forward proxies}, www_section = {Unsorted}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/uzunov2013torspdy.pdf}, url = {https://bibliography.gnunet.org}, }
@conference{2013_6, title = {Trawling for Tor Hidden Services: Detection, Measurement, Deanonymization}, author = {Biryukov, A. and Pustogarov, I. and Weinmann, R.}, booktitle = {Security and Privacy (SP), 2013 IEEE Symposium on}, year = {2013}, www_section = {Unsorted}, doi = {10.1109/SP.2013.15}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Trawling_for_tor_HS.pdf}, url = {https://bibliography.gnunet.org}, }
@conference{2013_7, title = {WhatsUp: A Decentralized Instant News Recommender}, author = {Antoine Boutet and Davide Frey and Rachid Guerraoui and Arnaud Jegou and Anne-Marie Kermarrec}, booktitle = {IEEE 27th International Symposium on Parallel & Distributed Processing}, organization = {IEEE}, year = {2013}, publisher = {IEEE}, abstract = {We present WHATSUP, a collaborative filtering system for disseminating news items in a large-scale dynamic setting with no central authority. WHATSUP constructs an implicit social network based on user profiles that express the opinions of users about the news items they receive (like-dislike). Users with similar tastes are clustered using a similarity metric reflecting long-standing and emerging (dis)interests. News items are disseminated through a novel heterogeneous gossip protocol that (1) biases the orientation of its targets towards those with similar interests, and (2) amplifies dissemination based on the level of interest in every news item. We report on an extensive evaluation of WHATSUP through (a) simulations, (b) a ModelNet emulation on a cluster, and (c) a PlanetLab deployment based on real datasets. We show that WHATSUP outperforms various alternatives in terms of accurate and complete delivery of relevant news items while preserving the fundamental advantages of standard gossip: namely, simplicity of deployment and robustness}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/whatsup.pdf}, www_section = {Unsorted}, url = {https://bibliography.gnunet.org}, }
@conference{2016, title = {Managing and Presenting User Attributes over a Decentralized Secure Name System}, author = {Martin Schanzenbach and Christian Banse}, booktitle = {Data Privacy Management and Security Assurance--11th International Workshop, {DPM} 2016 and 5th International Workshop, {QASA} 2016, Heraklion, Crete, Greece, September 26-27, 2016, Proceedings}, organization = {Springer}, year = {2016}, month = {September}, address = {Crete, Greece}, publisher = {Springer}, abstract = {Today, user attributes are managed at centralized identity providers. However, two centralized identity providers dominate digital identity and access management on the web. This is increasingly becoming a privacy problem in times of mass surveillance and data mining for targeted advertisement. Existing systems for attribute sharing or credential presentation either rely on a trusted third party service or require the presentation to be online and synchronous. In this paper we propose a concept that allows the user to manage and share his attributes asynchronously with a requesting party using a secure, decentralized name system}, www_section = {Decentralisation, GNUnet, Identity and Access Management, User Attributes}, www_tags = {selected}, url = {https://bibliography.gnunet.org}, }
@mastersthesis{2017_0, title = {The GNUnet System}, author = {Grothoff, Christian}, school = {Universit{\'e} de Rennes 1}, volume = {HDR}, year = {2017}, month = {December}, address = {Rennes}, pages = {0--181}, type = {Habilitation {\`a} diriger des recherches}, abstract = {GNUnet is an alternative network stack for building secure, decentralized and privacy-preserving distributed applications. Our goal is to replace the old insecure Internet protocol stack. Starting from an application for secure publication of files, it has grown to include all kinds of basic protocol components and applications towards the creation of a GNU internet. This habilitation provides an overview of the GNUnet architecture, including the development process, the network architecture and the software architecture. The goal of Part 1 is to provide an overview of how the various parts of the project work together today, and to then give ideas for future directions. The text is a first attempt to provide this kind of synthesis, and in return does not go into extensive technical depth on any particular topic. Part 2 then gives selected technical details based on eight publications covering many of the core components. This is a harsh selection; on the GNUnet website there are more than 50 published research papers and theses related to GNUnet, providing extensive and in-depth documentation. Finally, Part 3 gives an overview of current plans and future work}, keywords = {decentralization, GNUnet, peer-to-peer, privacy, private information retrieval, routing, secure multiparty computation, self-organization}, www_section = {decentralization, GNUnet, peer-to-peer, privacy, private information retrieval, routing, secure multiparty computation, self-organization}, www_tags = {selected}, doi = {https://hal.inria.fr/tel-01654244}, url = {https://grothoff.org/christian/habil.pdf}, }
@article{2018_0, title = {Toward secure name resolution on the internet}, author = {Christian Grothoff and Matthias Wachs and Monika Ermert and Jacob Appelbaum}, journal = {Computers & Security}, year = {2018}, abstract = {The Domain Name System (DNS) provides crucial name resolution functions for most Internet services. As a result, DNS traffic provides an important attack vector for mass surveillance, as demonstrated by the QUANTUMDNS and MORECOWBELL programs of the NSA. This article reviews how DNS works and describes security considerations for next generation name resolution systems. We then describe DNS variations and analyze their impact on security and privacy. We also consider Namecoin, the GNU Name System and RAINS, which are more radical re-designs of name systems in that they both radically change the wire protocol and also eliminate the existing global consensus on TLDs provided by ICANN. Finally, we assess how the different systems stack up with respect to the goal of improving security and privacy of name resolution for the future Internet}, keywords = {Future Internet, GNUnet, Name resolution, network architecture, privacy, Technology and society}, www_section = {Future Internet, GNUnet, Name resolution, network architecture, privacy, Technology and society}, issn = {0167-4048}, doi = {https://doi.org/10.1016/j.cose.2018.01.018}, url = {http://www.sciencedirect.com/science/article/pii/S0167404818300403}, www_tags = {selected}, }
@inproceedings{2018_1, title = {reclaimID: Secure, Self-Sovereign Identities using Name Systems and Attribute-Based Encryption}, author = {Schanzenbach, M. and Bramm, G. and Sch{\"u}tte, J.}, booktitle = {Proceedings of 17th IEEE International Conference On Trust, Security And Privacy In Computing And Communications/ 12th IEEE International Conference On Big Data Science And Engineering (TrustCom/BigDataSE)}, year = {2018}, abstract = {In this paper we present reclaimID: An architecture that allows users to reclaim their digital identities by securely sharing identity attributes without the need for a centralised service provider. We propose a design where user attributes are stored in and shared over a name system under user-owned namespaces. Attributes are encrypted using attribute-based encryption (ABE), allowing the user to selectively authorize and revoke access of requesting parties to subsets of his attributes. We present an implementation based on the decentralised GNU Name System (GNS) in combination with ciphertext-policy ABE using type-1 pairings. To show the practicality of our implementation, we carried out experimental evaluations of selected implementation aspects including attribute resolution performance. Finally, we show that our design can be used as a standard OpenID Connect Identity Provider allowing our implementation to be integrated into standard-compliant services}, keywords = {Computer Science - Cryptography and Security}, www_section = {Computer Science - Cryptography and Security}, url = {https://arxiv.org/abs/1805.06253v1}, www_tags = {selected}, }
@article{214121, title = {Impossibility of distributed consensus with one faulty process}, author = {Fischer, Michael J. and Lynch, Nancy A. and Paterson, Michael S.}, journal = {J. ACM}, volume = {32}, number = {2}, year = {1985}, address = {New York, NY, USA}, pages = {374--382}, publisher = {ACM}, abstract = {The consensus problem involves an asynchronous system of processes, some of which may be unreliable. The problem is for the reliable processes to agree on a binary value. In this paper, it is shown that every protocol for this problem has the possibility of nontermination, even with only one faulty process. By way of contrast, solutions are known for the synchronous case, the {\textquotedblleft}Byzantine Generals{\textquotedblright} problem}, issn = {0004-5411}, doi = {10.1145/3149.214121}, url = {http://portal.acm.org/citation.cfm?id=214121$\#$}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/pods06_paper01.pdf}, www_section = {Unsorted}, }
@article{224068, title = {Exploiting weak connectivity for mobile file access}, author = {Lily B. Mummert and Maria Ebling and Satyanarayanan, Mahadev}, journal = {SIGOPS Oper. Syst. Rev}, volume = {29}, number = {5}, year = {1995}, address = {New York, NY, USA}, pages = {143--155}, publisher = {ACM}, issn = {0163-5980}, doi = {10.1145/224057.224068}, url = {http://portal.acm.org/citation.cfm?id=224068$\#$}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/s15.pdf}, www_section = {Unsorted}, }
@conference{285258, title = {A digital fountain approach to reliable distribution of bulk data}, author = {Byers, John W. and Luby, Michael and Michael Mitzenmacher and Rege, Ashutosh}, booktitle = {SIGCOMM'98: Proceedings of SIGCOMM'98 Conference on Applications, Technologies, Architectures, and Protocols for Computer Communication}, organization = {ACM}, year = {1998}, month = {September}, address = {Vancouver, Canada}, pages = {56--67}, publisher = {ACM}, abstract = {The proliferation of applications that must reliably distribute bulk data to a large number of autonomous clients motivates the design of new multicast and broadcast protocols. We describe an ideal, fully scalable protocol for these applications that we call a digital fountain. A digital fountain allows any number of heterogeneous clients to acquire bulk data with optimal efficiency at times of their choosing. Moreover, no feedback channels are needed to ensure reliable delivery, even in the face of high loss rates.We develop a protocol that closely approximates a digital fountain using a new class of erasure codes that for large block sizes are orders of magnitude faster than standard erasure codes. We provide performance measurements that demonstrate the feasibility of our approach and discuss the design, implementation and performance of an experimental system}, www_section = {coding theory, multicast}, isbn = {1-58113-003-1}, doi = {10.1145/285237.285258}, url = {http://portal.acm.org/citation.cfm?id=285258\&dl=GUIDE\&coll=GUIDE\&CFID=102355791\&CFTOKEN=32605420$\#$}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.72.3011.pdf}, }
@conference{301333, title = {Flash mixing}, author = {Jakobsson, Markus}, booktitle = {PODC '99: Proceedings of the eighteenth annual ACM symposium on Principles of distributed computing}, organization = {ACM}, year = {1999}, address = {New York, NY, USA}, pages = {83--89}, publisher = {ACM}, isbn = {1-58113-099-6}, doi = {http://doi.acm.org/10.1145/301308.301333}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/flash-mix.pdf}, url = {https://bibliography.gnunet.org}, %%%%% ERROR: Missing field % www_section = {?????}, }
@conference{313556, title = {Next century challenges: scalable coordination in sensor networks}, author = {Deborah Estrin and Govindan, Ramesh and Heidemann, John and Kumar, Satish}, booktitle = {MobiCom '99: Proceedings of the 5th annual ACM/IEEE international conference on Mobile computing and networking}, organization = {ACM}, year = {1999}, address = {New York, NY, USA}, pages = {263--270}, publisher = {ACM}, abstract = {Networked sensors -- those that coordinate amongst themselves to achieve a larger sensing task -- will revolutionize information gathering and processing both in urban environments and in inhospitable terrain. The sheer numbers of these sensors and the expected dynamics in these environments present unique challenges in the design of unattended autonomous sensor networks. These challenges lead us to hypothesize that sensor network coordination applications may need to be structured differently from traditional network applications. In particular, we believe that localized algorithms (in which simple local node behavior achieves a desired global objective) may be necessary for sensor network coordination. In this paper, we describe localized algorithms, and then discuss directed diffusion, a simple communication model for describing localized algorithms}, www_section = {sensor networks}, isbn = {1-58113-142-9}, doi = {10.1145/313451.313556}, url = {http://portal.acm.org/citation.cfm?id=313451.313556$\#$}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.88.2867.pdf}, }
@conference{314722, title = {Analysis of random processes via And-Or tree evaluation}, author = {Luby, Michael and Michael Mitzenmacher and M. Amin Shokrollahi}, booktitle = {SODA '98: Proceedings of the ninth annual ACM-SIAM symposium on Discrete algorithms}, organization = {Society for Industrial and Applied Mathematics}, year = {1998}, address = {Philadelphia, PA, USA}, pages = {364--373}, publisher = {Society for Industrial and Applied Mathematics}, abstract = {We introduce a new set of probabilistic analysis tools based on the analysis of And-Or trees with random inputs. These tools provide a unifying, intuitive, and powerful framework for carrying out the analysis of several previously studied random processes of interest, including random loss-resilient codes, solving random k-SAT formula using the pure literal rule, and the greedy algorithm for matchings in random graphs. In addition, these tools allow generalizations of these problems not previously analyzed to be analyzed in a straightforward manner. We illustrate our methodology on the three problems listed above. 1 Introduction We introduce a new set of probabilistic analysis tools related to the amplification method introduced by [12] and further developed and used in [13, 5]. These tools provide a unifying, intuitive, and powerful framework for carrying out the analysis of several previously studied random processes of interest, including the random loss-resilient codes introduced}, www_section = {And-Or trees, coding theory}, isbn = {0-89871-410-9}, url = {http://portal.acm.org/citation.cfm?id=314722\&dl=GUIDE\&coll=GUIDE\&CFID=102355791\&CFTOKEN=32605420$\#$}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.63.2427.pdf}, }
@article{319159, title = {Deciding when to forget in the Elephant file system}, author = {Santry, Douglas S. and Feeley, Michael J. and Hutchinson, Norman C. and Veitch, Alistair C. and Carton, Ross W. and Ofir, Jacob}, journal = {SIGOPS Oper. Syst. Rev}, volume = {33}, number = {5}, year = {1999}, address = {New York, NY, USA}, pages = {110--123}, publisher = {ACM}, abstract = {Modern file systems associate the deletion of a file with the immediate release of storage, and file writes with the irrevocable change of file contents. We argue that this behavior is a relic of the past, when disk storage was a scarce resource. Today, large cheap disks make it possible for the file system to protect valuable data from accidental delete or overwrite. This paper describes the design, implementation, and performance of the Elephant file system, which automatically retains all important versions of user files. Users name previous file versions by combining a traditional pathname with a time when the desired version of a file or directory existed. Storage in Elephant is managed by the system using filegrain user-specified retention policies. This approach contrasts with checkpointing file systems such as Plan-9, AFS, and WAFL that periodically generate efficient checkpoints of entire file systems and thus restrict retention to be guided by a single policy for all files within that file system. Elephant is implemented as a new Virtual File System in the FreeBSD kernel}, www_section = {file systems, storage}, issn = {0163-5980}, doi = {10.1145/319344.319159}, url = {http://portal.acm.org/citation.cfm?id=319159$\#$}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/p110-santry.pdf}, }
@conference{335325, title = {The small-world phenomenon: an algorithm perspective}, author = {Kleinberg, Jon}, booktitle = {STOC '00: Proceedings of the thirty-second annual ACM symposium on Theory of computing}, organization = {ACM}, year = {2000}, address = {New York, NY, USA}, pages = {163--170}, publisher = {ACM}, abstract = {Long a matter of folklore, the {\textquotedblleft}small-world phenomenon {\textquotedblright} {\textemdash} the principle that we are all linked by short chains of acquaintances {\textemdash} was inaugurated as an area of experimental study in the social sciences through the pioneering work of Stanley Milgram in the 1960's. This work was among the first to make the phenomenon quantitative, allowing people to speak of the {\textquotedblleft}six degrees of separation {\textquotedblright} between any two people in the United States. Since then, a number of network models have been proposed as frameworks in which to study the problem analytically. One of the most refined of these models was formulated in recent work of Watts and Strogatz; their framework provided compelling evidence that the small-world phenomenon is pervasive in a range of networks arising in nature and technology, and a fundamental ingredient in the evolution of the World Wide Web. But existing models are insufficient to explain the striking algorithmic component of Milgram's original findings: that individuals using local information are collectively very effective at actually constructing short paths between two points in a social network. Although recently proposed network models are rich in short paths, we prove that no decentralized algorithm, operating with local information only, can construct short paths in these networks with non-negligible probability. We then define an infinite family of network models that naturally generalizes the Watts-Strogatz model, and show that for one of these models, there is a decentralized algorithm capable of finding short paths with high probability. More generally, we provide a strong characterization of this family of network models, showing that there is in fact a unique model within the family for which decentralized algorithms are effective}, www_section = {small-world}, isbn = {1-58113-184-4}, doi = {10.1145/335305.335325}, url = {http://portal.acm.org/citation.cfm?id=335325$\#$}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/swn.pdf}, }
@article{335405, title = {XMill: an efficient compressor for XML data}, author = {Liefke, Hartmut and Suciu, Dan}, journal = {SIGMOD Rec}, volume = {29}, number = {2}, year = {2000}, address = {New York, NY, USA}, pages = {153--164}, publisher = {ACM}, abstract = {We describe a tool for compressing XML data, with applications in data exchange and archiving, which usually achieves about twice the compression ratio of gzip at roughly the same speed. The compressor, called XMill, incorporates and combines existing compressors in order to apply them to heterogeneous XML data: it uses zlib, the library function for gzip, a collection of datatype specific compressors for simple data types, and, possibly, user defined compressors for application specific data types}, www_section = {compression}, issn = {0163-5808}, doi = {10.1145/335191.335405}, url = {http://portal.acm.org/citation.cfm?id=335405$\#$}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.33.2632.pdf}, }
@article{338955, title = {Ant algorithms for discrete optimization}, author = {Dorigo, Marco and Di Caro, Gianni and Gambardella, Luca M.}, journal = {Artif. Life}, volume = {5}, number = {2}, year = {1999}, address = {Cambridge, MA, USA}, pages = {137--172}, publisher = {MIT Press}, abstract = {This article presents an overview of recent work on ant algorithms, that is, algorithms for discrete optimization that took inspiration from the observation of ant colonies' foraging behavior, and introduces the ant colony optimization (ACO) metaheuristic. In the first part of the article the basic biological findings on real ants are reviewed and their artificial counterparts as well as the ACO metaheuristic are defined. In the second part of the article a number of applications of ACO algorithms to combinatorial optimization and routing in communications networks are described. We conclude with a discussion of related work and of some of the most important aspects of the ACO metaheuristic}, www_section = {ant colony optimization, metaheuristics, natural computation, swarm intelligence}, issn = {1064-5462}, doi = {10.1162/106454699568728}, url = {http://portal.acm.org/citation.cfm?id=338955$\#$}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/ij_23-alife99.pdf}, }
@conference{339337, title = {A case for end system multicast (keynote address)}, author = {Chu, Yang-hua and Rao, Sanjay G. and Zhang, Hui}, booktitle = {SIGMETRICS '00: Proceedings of the 2000 ACM SIGMETRICS international conference on Measurement and modeling of computer systems}, organization = {ACM}, year = {2000}, month = {June}, address = {Santa Clara, CA}, pages = {1--12}, publisher = {ACM}, abstract = {The conventional wisdom has been that IP is the natural protocol layer for implementing multicast related functionality. However, ten years after its initial proposal, IP Multicast is still plagued with concerns pertaining to scalability, network management, deployment and support for higher layer functionality such as error, flow and congestion control. In this paper, we explore an alternative architecture for small and sparse groups, where end systems implement all multicast related functionality including membership management and packet replication. We call such a scheme End System Multicast. This shifting of multicast support from routers to end systems has the potential to address most problems associated with IP Multicast. However, the key concern is the performance penalty associated with such a model. In particular, End System Multicast introduces duplicate packets on physical links and incurs larger end-to-end delay than IP Multicast. In this paper, we study this question in the context of the Narada protocol. In Narada, end systems self-organize into an overlay structure using a fully distributed protocol. In addition, Narada attempts to optimize the efficiency of the overlay based on end-to-end measurements. We present details of Narada and evaluate it using both simulation and Internet experiments. Preliminary results are encouraging. In most simulations and Internet experiments, the delay and bandwidth penalty are low. We believe the potential benefits of repartitioning multicast functionality between end systems and routers significantly outweigh the performance penalty incurred}, www_section = {multicast}, isbn = {1-58113-194-1}, doi = {10.1145/339331.339337}, url = {http://portal.acm.org/citation.cfm?id=339337$\#$}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/jsac-2001.pdf}, }
@article{339345, title = {Feasibility of a serverless distributed file system deployed on an existing set of desktop PCs}, author = {Bolosky, William J. and John R. Douceur and Ely, David and Marvin Theimer}, journal = {SIGMETRICS Performance Evaluation Review}, volume = {28}, number = {1}, year = {2000}, address = {New York, NY, USA}, pages = {34--43}, publisher = {ACM}, abstract = {We consider an architecture for a serverless distributed file system that does not assume mutual trust among the client computers. The system provides security, availability, and reliability by distributing multiple encrypted replicas of each file among the client machines. To assess the feasibility of deploying this system on an existing desktop infrastructure, we measure and analyze a large set of client machines in a commercial environment. In particular, we measure and report results on disk usage and content; file activity; and machine uptimes, lifetimes, and loads. We conclude that the measured desktop infrastructure would passably support our proposed system, providing availability on the order of one unfilled file request per user per thousand days}, www_section = {analytical modeling, availability, feasibility analysis, personal computer usage data, reliability, serverless distributed file system architecture, trust, workload characterization}, issn = {0163-5999}, doi = {10.1145/345063.339345}, url = {http://portal.acm.org/citation.cfm?id=345063.339345$\#$}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.65.4280_0.pdf}, }
@article{357176, title = {The Byzantine Generals Problem}, author = {Lamport, Leslie and Shostak, Robert and Pease, Marshall}, journal = {ACM Trans. Program. Lang. Syst}, volume = {4}, number = {3}, year = {1982}, address = {New York, NY, USA}, pages = {382--401}, publisher = {ACM}, issn = {0164-0925}, doi = {10.1145/357172.357176}, url = {http://portal.acm.org/citation.cfm?id=357176$\#$}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/byz.pdf}, www_section = {Unsorted}, }
@article{368907, title = {On programming of arithmetic operations}, author = {Andrey Petrovych Ershov}, journal = {Commun. ACM}, volume = {1}, number = {8}, year = {1958}, address = {New York, NY, USA}, pages = {3--6}, publisher = {ACM}, issn = {0001-0782}, doi = {10.1145/368892.368907}, url = {http://portal.acm.org/citation.cfm?id=368907$\#$}, www_section = {Unsorted}, }
@article{37517, title = {A simple and efficient implementation of a small database}, author = {Andrew D. Birrell and Michael B. Jones and Edward P. Wobber}, journal = {SIGOPS Oper. Syst. Rev}, volume = {21}, number = {5}, year = {1987}, address = {New York, NY, USA}, pages = {149--154}, publisher = {ACM}, abstract = {This paper describes a technique for implementing the sort of small databases that frequently occur in the design of operating systems and distributed systems. We take advantage of the existence of very large virtual memories, and quite large real memories, to make the technique feasible. We maintain the database as a strongly typed data structure in virtual memory, record updates incrementally on disk in a log and occasionally make a checkpoint of the entire database. We recover from crashes by restoring the database from an old checkpoint then replaying the log. We use existing packages to convert between strongly typed data objects and their disk representations, and to communicate strongly typed data across the network (using remote procedure calls). Our memory is managed entirely by a general purpose allocator and garbage collector. This scheme has been used to implement a name server for a distributed system. The resulting implementation has the desirable property of being simultaneously simple, efficient and reliable}, issn = {0163-5980}, doi = {10.1145/37499.37517}, url = {http://portal.acm.org/citation.cfm?id=37499.37517$\#$}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/024-DatabasesPaper.pdf}, www_section = {Unsorted}, }
@conference{378347, title = {Bayeux: an architecture for scalable and fault-tolerant wide-area data dissemination}, author = {Shelley Zhuang and Ben Y. Zhao and Anthony D. Joseph and Katz, Randy H. and John Kubiatowicz}, booktitle = {NOSSDAV '01: Proceedings of the 11th international workshop on Network and operating systems support for digital audio and video}, organization = {ACM}, year = {2001}, address = {New York, NY, USA}, pages = {11--20}, publisher = {ACM}, abstract = {The demand for streaming multimedia applications is growing at an incr edible rate. In this paper, we propose Bayeux, an efficient application-level multicast system that scales to arbitrarily large receiver groups while tolerating failures in routers and network links. Bayeux also includes specific mechanisms for load-balancing across replicate root nodes and more efficient bandwidth consumption. Our simulation results indicate that Bayeux maintains these properties while keeping transmission overhead low. To achieve these properties, Bayeux leverages the architecture of Tapestry, a fault-tolerant, wide-area overlay routing and location network}, www_section = {fault-tolerance, load balancing}, isbn = {1-58113-370-7}, doi = {10.1145/378344.378347}, url = {http://portal.acm.org/citation.cfm?id=378347$\#$}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/bayeux.pdf}, }
@conference{379239, title = {OceanStore: an architecture for global-scale persistent storage}, author = {John Kubiatowicz and Bindel, David and Chen, Yan and Czerwinski, Steven and Eaton, Patrick and Geels, Dennis and Gummadi, Ramakrishna and Rhea, Sean C. and Weatherspoon, Hakim and Wells, Chris and Ben Y. Zhao}, booktitle = {ASPLOS-IX: Proceedings of the ninth international conference on Architectural support for programming languages and operating systems}, organization = {ACM}, year = {2000}, address = {New York, NY, USA}, pages = {190--201}, publisher = {ACM}, abstract = {OceanStore is a utility infrastructure designed to span the globe and provide continuous access to persistent information. Since this infrastructure is comprised of untrusted servers, data is protected through redundancy and cryptographic techniques. To improve performance, data is allowed to be cached anywhere, anytime. Additionally, monitoring of usage patterns allows adaptation to regional outages and denial of service attacks; monitoring also enhances performance through pro-active movement of data. A prototype implementation is currently under development}, isbn = {1-58113-317-0}, doi = {10.1145/378993.379239}, url = {http://doi.acm.org/10.1145/378993.379239}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/p190-kubi.pdf}, %%%%% ERROR: Missing field % www_section = {?????}, }
@conference{383072, title = {A scalable content-addressable network}, author = {Sylvia Paul Ratnasamy and Paul Francis and Handley, Mark and Richard Karp and S Shenker}, booktitle = {SIGCOMM '01: Proceedings of the 2001 conference on Applications, technologies, architectures, and protocols for computer communications}, organization = {ACM}, year = {2001}, address = {New York, NY, USA}, pages = {161--172}, publisher = {ACM}, abstract = {Hash tables--which map "keys" onto "values"--are an essential building block in modern software systems. We believe a similar functionality would be equally valuable to large distributed systems. In this paper, we introduce the concept of a Content-Addressable Network (CAN) as a distributed infrastructure that provides hash table-like functionality on Internet-like scales. The CAN is scalable, fault-tolerant and completely self-organizing, and we demonstrate its scalability, robustness and low-latency properties through simulation}, www_section = {CAN, fault-tolerance, robustness}, isbn = {1-58113-411-8}, doi = {10.1145/383059.383072}, url = {http://portal.acm.org/citation.cfm?id=383072$\#$}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.19.8434.pdf}, }
@article{4202, title = {RCS---a system for version control}, author = {Tichy, Walter F.}, journal = {Softw. Pract. Exper}, volume = {15}, number = {7}, year = {1985}, address = {New York, NY, USA}, pages = {637--654}, publisher = {John Wiley & Sons, Inc}, abstract = {An important problem in program development and maintenance is version control, i.e., the task of keeping a software system consisting of many versions and configurations well organized. The Revision Control System (RCS) is a software tool that assists with that task. RCS manages revisions of text documents, in particular source programs, documentation, and test data. It automates the storing, retrieval, logging and identification of revisions, and it provides selection mechanisms for composing configurations. This paper introduces basic version control concepts and discusses the practice of version control using RCS. For conserving space, RCS stores deltas, i.e., differences between successive revisions. Several delta storage methods are discussed. Usage statistics show that RCS's delta storage method is space and time efficient. The paper concludes with a detailed survey of version control tools}, www_section = {version control}, issn = {0038-0644}, doi = {10.1002/spe.4380150703}, url = {http://portal.acm.org/citation.cfm?id=4202$\#$}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.56.3350_0.pdf}, }
@conference{501437, title = {The quest for security in mobile ad hoc networks}, author = {Jean-Pierre Hubaux and Levente Butty{\'a}n and Srdan Capkun}, booktitle = {MobiHoc '01: Proceedings of the 2nd ACM international symposium on Mobile ad hoc networking \& computing}, organization = {ACM}, year = {2001}, address = {New York, NY, USA}, pages = {146--155}, publisher = {ACM}, abstract = {So far, research on mobile ad hoc networks has been forcused primarily on routing issues. Security, on the other hand, has been given a lower priority. This paper provides an overview of security problems for mobile ad hoc networks, distinguishing the threats on basic mechanisms and on security mechanisms. It then describes our solution to protect the security mechanisms. The original features of this solution include that (i) it is fully decentralized and (ii) all nodes are assigned equivalent roles}, www_section = {ad-hoc networks, routing}, isbn = {1-58113-428-2}, doi = {10.1145/501436.501437}, url = {http://portal.acm.org/citation.cfm?id=501437$\#$}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Quest01.pdf}, }
@article{502048, title = {Resilient overlay networks}, author = {Andersen, David and Hari Balakrishnan and Frans M. Kaashoek and Robert Morris}, journal = {SIGOPS Oper. Syst. Rev}, volume = {35}, number = {5}, year = {2001}, address = {New York, NY, USA}, pages = {131--145}, publisher = {ACM}, abstract = {A Resilient Overlay Network (RON) is an architecture that allows distributed Internet applications to detect and recover from path outages and periods of degraded performance within several seconds, improving over today's wide-area routing protocols that take at least several minutes to recover. A RON is an application-layer overlay on top of the existing Internet routing substrate. The RON nodes monitor the functioning and quality of the Internet paths among themselves, and use this information to decide whether to route packets directly over the Internet or by way of other RON nodes, optimizing application-specific routing metrics.Results from two sets of measurements of a working RON deployed at sites scattered across the Internet demonstrate the benefits of our architecture. For instance, over a 64-hour sampling period in March 2001 across a twelve-node RON, there were 32 significant outages, each lasting over thirty minutes, over the 132 measured paths. RON's routing mechanism was able to detect, recover, and route around all of them, in less than twenty seconds on average, showing that its methods for fault detection and recovery work well at discovering alternate paths in the Internet. Furthermore, RON was able to improve the loss rate, latency, or throughput perceived by data transfers; for example, about 5\% of the transfers doubled their TCP throughput and 5\% of our transfers saw their loss probability reduced by 0.05. We found that forwarding packets via at most one intermediate RON node is sufficient to overcome faults and improve performance in most cases. These improvements, particularly in the area of fault detection and recovery, demonstrate the benefits of moving some of the control over routing into the hands of end-systems}, www_section = {resilient overlay network}, issn = {0163-5980}, doi = {10.1145/502059.502048}, url = {http://portal.acm.org/citation.cfm?id=502059.502048$\#$}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/ron-sosp2001.pdf}, }
@conference{502052, title = {A low-bandwidth network file system}, author = {Muthitacharoen, Athicha and Chen, Benjie and David Mazi{\`e}res}, booktitle = {SOSP '01: Proceedings of the eighteenth ACM symposium on Operating systems principles}, organization = {ACM}, year = {2001}, address = {New York, NY, USA}, pages = {174--187}, publisher = {ACM}, abstract = {Users rarely consider running network file systems over slow or wide-area networks, as the performance would be unacceptable and the bandwidth consumption too high. Nonetheless, efficient remote file access would often be desirable over such networks---particularly when high latency makes remote login sessions unresponsive. Rather than run interactive programs such as editors remotely, users could run the programs locally and manipulate remote files through the file system. To do so, however, would require a network file system that consumes less bandwidth than most current file systems.This paper presents LBFS, a network file system designed for low-bandwidth networks. LBFS exploits similarities between files or versions of the same file to save bandwidth. It avoids sending data over the network when the same data can already be found in the server's file system or the client's cache. Using this technique in conjunction with conventional compression and caching, LBFS consumes over an order of magnitude less bandwidth than traditional network file systems on common workloads}, www_section = {file systems, workload characterization}, isbn = {1-58113-389-8}, doi = {10.1145/502034.502052}, url = {http://portal.acm.org/citation.cfm?id=502052$\#$}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/lbfs.pdf}, }
@conference{502054, title = {Wide-area cooperative storage with CFS}, author = {Dabek, Frank and Frans M. Kaashoek and David Karger and Robert Morris and Ion Stoica}, booktitle = {SOSP '01: Proceedings of the eighteenth ACM symposium on Operating systems principles}, organization = {ACM}, year = {2001}, address = {New York, NY, USA}, pages = {202--215}, publisher = {ACM}, abstract = {The Cooperative File System (CFS) is a new peer-to-peer read-only storage system that provides provable guarantees for the efficiency, robustness, and load-balance of file storage and retrieval. CFS does this with a completely decentralized architecture that can scale to large systems. CFS servers provide a distributed hash table (DHash) for block storage. CFS clients interpret DHash blocks as a file system. DHash distributes and caches blocks at a fine granularity to achieve load balance, uses replication for robustness, and decreases latency with server selection. DHash finds blocks using the Chord location protocol, which operates in time logarithmic in the number of servers.CFS is implemented using the SFS file system toolkit and runs on Linux, OpenBSD, and FreeBSD. Experience on a globally deployed prototype shows that CFS delivers data to clients as fast as FTP. Controlled tests show that CFS is scalable: with 4,096 servers, looking up a block of data involves contacting only seven servers. The tests also demonstrate nearly perfect robustness and unimpaired performance even when as many as half the servers fail}, www_section = {P2P}, isbn = {1-58113-389-8}, doi = {10.1145/502034.502054}, url = {http://portal.acm.org/citation.cfm?id=502054$\#$}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/cfs_sosp.pdf}, }
@conference{511496, title = {Choosing reputable servents in a P2P network}, author = {Cornelli, Fabrizio and Ernesto Damiani and Sabrina De Capitani di Vimercati and Stefano Paraboschi and Pierangela Samarati}, booktitle = {WWW '02: Proceedings of the 11th international conference on World Wide Web}, organization = {ACM}, year = {2002}, address = {New York, NY, USA}, pages = {376--386}, publisher = {ACM}, www_section = {credibility, polling protocol, reputation}, isbn = {1-58113-449-5}, doi = {10.1145/511446.511496}, url = {http://portal.acm.org/citation.cfm?id=511496$\#$}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/samarati.pdf}, }
@conference{513828, title = {Performance analysis of the CONFIDANT protocol}, author = {Sonja Buchegger and Jean-Yves Le Boudec}, booktitle = {MobiHoc '02: Proceedings of the 3rd ACM international symposium on Mobile ad hoc networking \& computing}, organization = {ACM}, year = {2002}, address = {New York, NY, USA}, pages = {226--236}, publisher = {ACM}, abstract = {Mobile ad-hoc networking works properly only if the participating nodes cooperate in routing and forwarding. However,it may be advantageous for individual nodes not to cooperate. We propose a protocol, called CONFIDANT, for making misbehavior unattractive; it is based on selective altruism and utilitarianism. It aims at detecting and isolating misbehaving nodes, thus making it unattractive to deny cooperation. Trust relationships and routing decisions are based on experienced, observed, or reported routing and forwarding behavior of other nodes. The detailed implementation of CONFIDANT in this paper assumes that the network layer is based on the Dynamic Source Routing (DSR) protocol. We present a performance analysis of DSR fortified by CONFIDANT and compare it to regular defenseless DSR. It shows that a network with CONFIDANT and up to 60\% of misbehaving nodes behaves almost as well as a benign network, in sharp contrast to a defenseless network. All simulations have been implemented and performed in GloMoSim}, www_section = {cooperation, fairness, mobile Ad-hoc networks, reputation, robustness, routing, trust}, isbn = {1-58113-501-7}, doi = {10.1145/513800.513828}, url = {http://portal.acm.org/citation.cfm?id=513828$\#$}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/BucheggerL02.pdf}, }
@conference{514164, title = {Enforcing service availability in mobile ad-hoc WANs}, author = {Levente Butty{\'a}n and Jean-Pierre Hubaux}, booktitle = {MobiHoc '00: Proceedings of the 1st ACM international symposium on Mobile ad hoc networking \& computing}, organization = {IEEE Press}, year = {2000}, address = {Piscataway, NJ, USA}, pages = {87--96}, publisher = {IEEE Press}, abstract = {In this paper, we address the problem of service availability in mobile ad-hoc WANs. We present a secure mechanism to stimulate end users to keep their devices turned on, to refrain from overloading the network, and to thwart tampering aimed at converting the device into a "selfish" one. Our solution is based on the application of a tamper resistant security module in each device and cryptographic protection of messages}, www_section = {ad-hoc networks, cryptography}, isbn = {0-7803-6534-8}, url = {http://portal.acm.org/citation.cfm?id=514164}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.84.5715.pdf}, }
@conference{5328076, title = {Performance Evaluation of On-Demand Multipath Distance Vector Routing Protocol under Different Traffic Models}, author = {Malarkodi, B. and Rakesh, P. and Venkataramani, B.}, booktitle = {International Conference on Advances in Recent Technologies in Communication and Computing, 2009. ARTCom '09}, year = {2009}, month = {October}, pages = {77--80}, abstract = {Traffic models are the heart of any performance evaluation of telecommunication networks. Understanding the nature of traffic in high speed, high bandwidth communication system is essential for effective operation and performance evaluation of the networks. Many routing protocols reported in the literature for Mobile ad hoc networks(MANETS) have been primarily designed and analyzed under the assumption of CBR traffic models, which is unable to capture the statistical characteristics of the actual traffic. It is necessary to evaluate the performance properties of MANETs in the context of more realistic traffic models. In an effort towards this end, this paper evaluates the performance of adhoc on demand multipath distance vector (AOMDV) routing protocol in the presence of poisson and bursty self similar traffic and compares them with that of CBR traffic. Different metrics are considered in analyzing the performance of routing protocol including packet delivery ratio, throughput and end to end delay. Our simulation results indicate that the packet delivery fraction and throughput in AOMDV is increased in the presence of self similar traffic compared to other traffic. Moreover, it is observed that the end to end delay in the presence of self similar traffic is lesser than that of CBR and higher than that of poisson traffic}, www_section = {ad-hoc networks, AOMDV, distance vector, multi-path, performance}, doi = {10.1109/ARTCom.2009.31}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/multipath-dv-perf.pdf}, }
@book{538134, title = {Capability-Based Computer Systems}, author = {Levy, Henry M.}, organization = {Butterworth-Heinemann}, year = {1984}, address = {Newton, MA, USA}, publisher = {Butterworth-Heinemann}, isbn = {0932376223}, url = {http://portal.acm.org/citation.cfm?id=538134$\#$}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Preface.pdf}, www_section = {Unsorted}, }
@book{558412, title = {Peer-to-Peer: Harnessing the Power of Disruptive Technologies}, author = {Oram, Andy}, organization = {O'Reilly \& Associates, Inc}, year = {2001}, address = {Sebastopol, CA, USA}, editor = {Andy oram}, publisher = {O'Reilly \& Associates, Inc}, abstract = {Upstart software projects Napster, Gnutella, and Freenet have dominated newspaper headlines, challenging traditional approaches to content distribution with their revolutionary use of peer-to-peer file-sharing technologies. Reporters try to sort out the ramifications of seemingly ungoverned peer-to-peer networks. Lawyers, business leaders, and social commentators debate the virtues and evils of these bold new distributed systems. But what's really behind such disruptive technologies -- the breakthrough innovations that have rocked the music and media worlds? And what lies ahead? In this book, key peer-to-peer pioneers take us beyond the headlines and hype and show how the technology is changing the way we communicate and exchange information. Those working to advance peer-to-peer as a technology, a business opportunity, and an investment offer their insights into how the technology has evolved and where it's going. They explore the problems they've faced, the solutions they've discovered, the lessons they've learned, and their goals for the future of computer networking. Until now, Internet communities have been limited by the flat interactive qualities of email and network newsgroups, where people can exchange recommendations and ideas but have great difficulty commenting on one another's postings, structuring information, performing searches, and creating summaries. Peer-to-peer challenges the traditional authority of the client/server model, allowing shared information to reside instead with producers and users. Peer-to-peer networks empower users to collaborate on producing and consuming information, adding to it, commenting on it, and building communities around it. This compilation represents the collected wisdom of today's peer-to-peer luminaries. It includes contributions from Gnutella's Gene Kan, Freenet's Brandon Wiley, Jabber's Jeremie Miller, and many others -- plus serious discussions of topics ranging from accountability and trust to security and performance. Fraught with questions and promise, peer-to-peer is sure to remain on the computer industry's center stage for years to come}, isbn = {059600110X}, url = {http://portal.acm.org/citation.cfm?id=558412$\#$}, %%%%% ERROR: Missing field % www_section = {?????}, }
@article{567178, title = {A State-of-the-Art Survey on Software Merging}, author = {Mens, Tom}, journal = {IEEE Trans. Softw. Eng}, volume = {28}, number = {5}, year = {2002}, address = {Piscataway, NJ, USA}, pages = {449--462}, publisher = {IEEE Press}, abstract = {Software merging is an essential aspect of the maintenance and evolution of large-scale software systems. This paper provides a comprehensive survey and analysis of available merge approaches. Over the years, a wide variety of different merge techniques has been proposed. While initial techniques were purely based on textual merging, more powerful approaches also take the syntax and semantics of the software into account. There is a tendency towards operation-based merging because of its increased expressiveness. Another tendency is to try to define merge techniques that are as general, accurate, scalable, and customizable as possible, so that they can be used in any phase in the software life-cycle and detect as many conflicts as possible. After comparing the possible merge techniques, we suggest a number of important open problems and future research directions}, www_section = {conflict detection, large-scale software development, merge conflicts, software merging}, issn = {0098-5589}, doi = {10.1109/TSE.2002.1000449}, url = {http://portal.acm.org/citation.cfm?id=567178$\#$}, }
@article{568525, title = {A survey of rollback-recovery protocols in message-passing systems}, author = {Mootaz Elnozahy and Lorenzo Alvisi and Yi-Min Wang and Johnson, David B.}, journal = {ACM Comput. Surv}, volume = {34}, number = {3}, year = {2002}, address = {New York, NY, USA}, pages = {375--408}, publisher = {ACM}, abstract = {This survey covers rollback-recovery techniques that do not require special language constructs. In the first part of the survey we classify rollback-recovery protocols into checkpoint-based and log-based. Checkpoint-based protocols rely solely on checkpointing for system state restoration. Checkpointing can be coordinated, uncoordinated, or communication-induced. Log-based protocols combine checkpointing with logging of nondeterministic events, encoded in tuples called determinants. Depending on how determinants are logged, log-based protocols can be pessimistic, optimistic, or causal. Throughout the survey, we highlight the research issues that are at the core of rollback-recovery and present the solutions that currently address them. We also compare the performance of different rollback-recovery protocols with respect to a series of desirable properties and discuss the issues that arise in the practical implementations of these protocols}, www_section = {message logging, rollback-recovery}, issn = {0360-0300}, doi = {10.1145/568522.568525}, url = {http://portal.acm.org/citation.cfm?id=568522.568525$\#$}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/CMU-CS-99-148.pdf}, }
@article{571638, title = {COCA: A secure distributed online certification authority}, author = {Zhou, Lidong and Schneider, Fred B. and Robbert Van Renesse}, journal = {ACM Trans. Comput. Syst}, volume = {20}, number = {4}, year = {2002}, address = {New York, NY, USA}, pages = {329--368}, publisher = {ACM}, abstract = {COCA is a fault-tolerant and secure online certification authority that has been built and deployed both in a local area network and in the Internet. Extremely weak assumptions characterize environments in which COCA's protocols execute correctly: no assumption is made about execution speed and message delivery delays; channels are expected to exhibit only intermittent reliability; and with 3t + 1 COCA servers up to t may be faulty or compromised. COCA is the first system to integrate a Byzantine quorum system (used to achieve availability) with proactive recovery (used to defend against mobile adversaries which attack, compromise, and control one replica for a limited period of time before moving on to another). In addition to tackling problems associated with combining fault-tolerance and security, new proactive recovery protocols had to be developed. Experimental results give a quantitative evaluation for the cost and effectiveness of the protocols}, www_section = {byzantine fault tolerance, certification authority, denial-of-service, proactive secret-sharing, public key cryptography, threshold cryptography}, issn = {0734-2071}, doi = {10.1145/571637.571638}, url = {http://portal.acm.org/citation.cfm?id=571638$\#$}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/cocaTOCS.pdf}, }
@conference{571857, title = {Viceroy: a scalable and dynamic emulation of the butterfly}, author = {Malkhi, Dahlia and Moni Naor and Ratajczak, David}, booktitle = {PODC '02: Proceedings of the twenty-first annual symposium on Principles of distributed computing}, organization = {ACM}, year = {2002}, address = {New York, NY, USA}, pages = {183--192}, publisher = {ACM}, abstract = {We propose a family of constant-degree routing networks of logarithmic diameter, with the additional property that the addition or removal of a node to the network requires no global coordination, only a constant number of linkage changes in expectation, and a logarithmic number with high probability. Our randomized construction improves upon existing solutions, such as balanced search trees, by ensuring that the congestion of the network is always within a logarithmic factor of the optimum with high probability. Our construction derives from recent advances in the study of peer-to-peer lookup networks, where rapid changes require efficient and distributed maintenance, and where the lookup efficiency is impacted both by the lengths of paths to requested data and the presence or elimination of bottlenecks in the network}, www_section = {P2P}, isbn = {1-58113-485-1}, doi = {10.1145/571825.571857}, url = {http://portal.acm.org/citation.cfm?id=571857$\#$}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/viceroy.pdf}, }
@article{581193, title = {Modelling with Generalized Stochastic Petri Nets}, author = {Marco Ajmone Marsan and Gianfranco Balbo and Gianni Conte and Susanna Donatelli and Giuliana Franceschinis}, journal = {SIGMETRICS Perform. Eval. Rev}, volume = {26}, number = {2}, year = {1998}, address = {New York, NY, USA}, pages = {0--2}, publisher = {ACM}, issn = {0163-5999}, doi = {10.1145/288197.581193}, url = {http://portal.acm.org/citation.cfm?id=581193$\#$}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.83.6433.pdf}, %%%%% ERROR: Missing field % www_section = {?????}, }
@conference{586136, title = {Query-flood DoS attacks in gnutella}, author = {Daswani, Neil and Hector Garcia-Molina}, booktitle = {CCS '02: Proceedings of the 9th ACM conference on Computer and communications security}, organization = {ACM}, year = {2002}, address = {New York, NY, USA}, pages = {181--192}, publisher = {ACM}, abstract = {We describe a simple but effective traffic model that can be used to understand the effects of denial-of-service (DoS) attacks based on query floods in Gnutella networks. We run simulations based on the model to analyze how different choices of network topology and application level load balancing policies can minimize the effect of these types of DoS attacks. In addition, we also study how damage caused by query floods is distributed throughout the network, and how application-level policies can localize the damage}, www_section = {denial-of-service, P2P}, isbn = {1-58113-612-9}, doi = {10.1145/586110.586136}, url = {http://portal.acm.org/citation.cfm?id=586110.586136$\#$}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/p115-daswani_0.pdf}, }
@article{605408, title = {Energy-efficient computing for wildlife tracking: design tradeoffs and early experiences with ZebraNet}, author = {Juang, Philo and Oki, Hidekazu and Wang, Yong and Martonosi, Margaret and Peh, Li Shiuan and Rubenstein, Daniel}, journal = {SIGARCH Comput. Archit. News}, volume = {30}, number = {5}, year = {2002}, address = {New York, NY, USA}, pages = {96--107}, publisher = {ACM}, abstract = {Over the past decade, mobile computing and wireless communication have become increasingly important drivers of many new computing applications. The field of wireless sensor networks particularly focuses on applications involving autonomous use of compute, sensing, and wireless communication devices for both scientific and commercial purposes. This paper examines the research decisions and design tradeoffs that arise when applying wireless peer-to-peer networking techniques in a mobile sensor network designed to support wildlife tracking for biology research.The ZebraNet system includes custom tracking collars (nodes) carried by animals under study across a large, wild area; the collars operate as a peer-to-peer network to deliver logged data back to researchers. The collars include global positioning system (GPS), Flash memory, wireless transceivers, and a small CPU; essentially each node is a small, wireless computing device. Since there is no cellular service or broadcast communication covering the region where animals are studied, ad hoc, peer-to-peer routing is needed. Although numerous ad hoc protocols exist, additional challenges arise because the researchers themselves are mobile and thus there is no fixed base station towards which to aim data. Overall, our goal is to use the least energy, storage, and other resources necessary to maintain a reliable system with a very high {\textquoteleft}data homing' success rate. We plan to deploy a 30-node ZebraNet system at the Mpala Research Centre in central Kenya. More broadly, we believe that the domain-centric protocols and energy tradeoffs presented here for ZebraNet will have general applicability in other wireless and sensor applications}, issn = {0163-5964}, doi = {10.1145/635506.605408}, url = {http://portal.acm.org/citation.cfm?id=635506.605408$\#$}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/asplos-x_annot.pdf}, %%%%% ERROR: Missing field % www_section = {?????}, }
@inproceedings{627372, title = {SURF-2: A program for dependability evaluation of complex hardware and software systems}, author = {C. Beounes and M. Aguera and J. Arlat and S. Bachmann and C. Bourdeau and J. -. Doucet and K. Kanoun and J. -. Laprie and S. Metge and J. Moreira de Souza and D. Powell and P. Spiesser}, booktitle = {Proceedings of FTCS-23 The Twenty-Third International Symposium on Fault-Tolerant Computing}, volume = {}, number = {}, year = {1993}, month = {June}, pages = {668--673}, abstract = {SURF-2, a software tool for evaluating system dependability, is described. It is especially designed for an evaluation-based system design approach in which multiple design solutions need to be compared from the dependability viewpoint. System behavior may be modeled either by Markov chains or by generalized stochastic Petri nets. The tool supports the evaluation of different measures of dependability, including pointwise measures, asymptotic measures, mean sojourn times and, by superposing a reward structure on the behavior model, reward measures such as expected performance or cost}, www_section = {software reliability, system behaviour, SURF-2, dependability evaluation, complex hardware and software systems, software tool, system dependability, evaluation-based system design approach, multiple design solutions, Markov chains, generalized stochastic Petri nets, measures of dependability, pointwise measures, asymptotic measures, mean sojourn times, reward structure, reward measures, performance, Hardware, Software systems, Stochastic systems, Petri nets, Software tools, Process design, Stochastic processes, Humans, Costs, Performance evaluation}, doi = {10.1109/FTCS.1993.627372}, issn = {0731-3071}, isbn = {0-8186-3680-7}, url = {https://ieeexplore.ieee.org/document/627372/authors#authors}, }
@conference{633027, title = {Understanding BGP misconfiguration}, author = {Mahajan, Ratul and Wetherall, David and Anderson, Thomas}, booktitle = {SIGCOMM '02: Proceedings of the 2002 conference on Applications, technologies, architectures, and protocols for computer communications}, organization = {ACM}, year = {2002}, address = {New York, NY, USA}, pages = {3--16}, publisher = {ACM}, abstract = {It is well-known that simple, accidental BGP configuration errors can disrupt Internet connectivity. Yet little is known about the frequency of misconfiguration or its causes, except for the few spectacular incidents of widespread outages. In this paper, we present the first quantitative study of BGP misconfiguration. Over a three week period, we analyzed routing table advertisements from 23 vantage points across the Internet backbone to detect incidents of misconfiguration. For each incident we polled the ISP operators involved to verify whether it was a misconfiguration, and to learn the cause of the incident. We also actively probed the Internet to determine the impact of misconfiguration on connectivity.Surprisingly, we find that configuration errors are pervasive, with 200-1200 prefixes (0.2-1.0\% of the BGP table size) suffering from misconfiguration each day. Close to 3 in 4 of all new prefix advertisements were results of misconfiguration. Fortunately, the connectivity seen by end users is surprisingly robust to misconfigurations. While misconfigurations can substantially increase the update load on routers, only one in twenty five affects connectivity. While the causes of misconfiguration are diverse, we argue that most could be prevented through better router design}, www_section = {border gateway protocol}, isbn = {1-58113-570-X}, doi = {10.1145/633025.633027}, url = {http://portal.acm.org/citation.cfm?id=633027$\#$}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/bgpmisconfig.pdf}, }
@conference{633045, title = {Scalable application layer multicast}, author = {Banerjee, Suman and Bobby Bhattacharjee and Kommareddy, Christopher}, booktitle = {SIGCOMM '02: Proceedings of the 2002 conference on Applications, technologies, architectures, and protocols for computer communications}, organization = {ACM}, year = {2002}, address = {New York, NY, USA}, pages = {205--217}, publisher = {ACM}, abstract = {We describe a new scalable application-layer multicast protocol, specifically designed for low-bandwidth, data streaming applications with large receiver sets. Our scheme is based upon a hierarchical clustering of the application-layer multicast peers and can support a number of different data delivery trees with desirable properties.We present extensive simulations of both our protocol and the Narada application-layer multicast protocol over Internet-like topologies. Our results show that for groups of size 32 or more, our protocol has lower link stress (by about 25\%), improved or similar end-to-end latencies and similar failure recovery properties. More importantly, it is able to achieve these results by using orders of magnitude lower control traffic.Finally, we present results from our wide-area testbed in which we experimented with 32-100 member groups distributed over 8 different sites. In our experiments, average group members established and maintained low-latency paths and incurred a maximum packet loss rate of less than 1\% as members randomly joined and left the multicast group. The average control overhead during our experiments was less than 1 Kbps for groups of size 100}, www_section = {application layer multicast, hierarchy, overlay networks, P2P, scalability}, isbn = {1-58113-570-X}, doi = {10.1145/633025.633045}, url = {http://portal.acm.org/citation.cfm?id=633045$\#$}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/sigcomm02.pdf}, }
@conference{642636, title = {Usability and privacy: a study of Kazaa P2P file-sharing}, author = {Good, Nathaniel S. and Krekelberg, Aaron}, booktitle = {CHI '03: Proceedings of the SIGCHI conference on Human factors in computing systems}, organization = {ACM}, year = {2003}, address = {New York, NY, USA}, pages = {137--144}, publisher = {ACM}, abstract = {P2P file sharing systems such as Gnutella, Freenet, and KaZaA, while primarily intended for sharing multimedia files, frequently allow other types of information to be shared. This raises serious concerns about the extent to which users may unknowingly be sharing private or personal information.In this paper, we report on a cognitive walkthrough and a laboratory user study of the KaZaA file sharing user interface. The majority of the users in our study were unable to tell what files they were sharing, and sometimes incorrectly assumed they were not sharing any files when in fact they were sharing all files on their hard drive. An analysis of the KaZaA network suggested that a large number of users appeared to be unwittingly sharing personal and private files, and that some users were indeed taking advantage of this and downloading files containing ostensibly private information}, www_section = {file-sharing, P2P}, isbn = {1-58113-630-7}, doi = {10.1145/642611.642636}, url = {http://portal.acm.org/citation.cfm?id=642611.642636$\#$}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/HPL-2002-163.pdf}, }
@booklet{646334, title = {IPTPS '01: Revised Papers from the First International Workshop on Peer-to-Peer Systems}, author = {TODO}, year = {2002}, address = {London, UK}, editor = {Peter Druschel and Frans M. Kaashoek and Antony Rowstron}, publisher = {Springer-Verlag}, isbn = {3-540-44179-4}, url = {http://portal.acm.org/citation.cfm?id=646334$\#$}, %%%%% ERROR: Missing field % www_section = {?????}, }
@conference{651321, title = {Venti: A New Approach to Archival Storage}, author = {Quinlan, Sean and Dorward, Sean}, booktitle = {FAST '02: Proceedings of the Conference on File and Storage Technologies}, organization = {USENIX Association}, year = {2002}, address = {Berkeley, CA, USA}, pages = {89--101}, publisher = {USENIX Association}, abstract = {This paper describes a network storage system, called Venti, intended for archival data. In this system, a unique hash of a block's contents acts as the block identifier for read and write operations. This approach enforces a write-once policy, preventing accidental or malicious destruction of data. In addition, duplicate copies of a block can be coalesced, reducing the consumption of storage and simplifying the implementation of clients. Venti is a building block for constructing a variety of storage applications such as logical backup, physical backup, and snapshot file systems}, www_section = {backup, file systems, network storage}, isbn = {1-880446-03-0}, url = {http://portal.acm.org/citation.cfm?id=651321$\#$}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/venti-fast.pdf}, }
@conference{664025, title = {AMnet 2.0: An Improved Architecture for Programmable Networks}, author = {Thomas Fuhrmann and Till Harbaum and Marcus Schoeller and Martina Zitterbart}, booktitle = {IWAN '02: Proceedings of the IFIP-TC6 4th International Working Conference on Active Networks}, organization = {Springer-Verlag}, year = {2002}, address = {London, UK}, pages = {162--176}, publisher = {Springer-Verlag}, abstract = {AMnet 2.0 is an improved architecture for programmable networks that is based on the experiences from the previous implementation of AMnet. This paper gives an overview of the AMnet architecture and Linux-based implementation of this software router. It also discusses the differences to the previous version of AMnet. AMnet 2.0 complements application services with net-centric services in an integrated system that provides the fundamental building blocks both for an active node itself and the operation of a larger set of nodes, including code deployment decisions, service relocation, resource management}, www_section = {programmable networks}, isbn = {3-540-00223-5}, doi = {10.1007/3-540-36199-5}, url = {http://portal.acm.org/citation.cfm?id=664025$\#$}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/fuhrmann02architecture_0.pdf}, }
@conference{672869, title = {Balanced Distributed Search Trees Do Not Exist}, author = {Kr{\"o}ll, Brigitte and Widmayer, Peter}, booktitle = {WADS '95: Proceedings of the 4th International Workshop on Algorithms and Data Structures}, organization = {Springer-Verlag}, year = {1995}, address = {London, UK}, pages = {50--61}, publisher = {Springer-Verlag}, abstract = {This paper is a first step towards an understanding of the inherent limitations of distributed data structures. We propose a model of distributed search trees that is based on few natural assumptions. We prove that any class of trees within our model satisfies a lower bound of \Omega\Gamma p m) on the worst case height of distributed search trees for m keys. That is, unlike in the single site case, balance in the sense that the tree height satisfies a logarithmic upper bound cannot be achieved. This is true although each node is allowed to have arbitrary degree (note that in this case, the height of a single site search tree is trivially bounded by one). By proposing a method that generates trees of height O( p m), we show the bound to be tight. 1 Introduction Distributed data structures have attracted considerable attention in the past few years. From a practical viewpoint, this is due to the increasing availability of networks of workstations}, isbn = {3-540-60220-8}, url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.34.4081}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.34.4081.pdf}, www_section = {Unsorted}, }
@conference{687810, title = {Security Considerations for Peer-to-Peer Distributed Hash Tables}, author = {Emil Sit and Robert Morris}, booktitle = {IPTPS '01: Revised Papers from the First International Workshop on Peer-to-Peer Systems}, organization = {Springer-Verlag}, year = {2002}, address = {London, UK}, pages = {261--269}, publisher = {Springer-Verlag}, abstract = {Recent peer-to-peer research has focused on providing efficient hash lookup systems that can be used to build more complex systems. These systems have good properties when their algorithms are executed correctly but have not generally considered how to handle misbehaving nodes. This paper looks at what sorts of security problems are inherent in large peer-to-peer systems based on distributed hash lookup systems. We examine the types of problems that such systems might face, drawing examples from existing systems, and propose some design principles for detecting and preventing these problems}, www_section = {distributed hash table, P2P}, isbn = {3-540-44179-4}, url = {http://portal.acm.org/citation.cfm?id=687810$\#$}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/173.pdf}, }
@conference{687814, title = {Erasure Coding Vs. Replication: A Quantitative Comparison}, author = {Weatherspoon, Hakim and John Kubiatowicz}, booktitle = {IPTPS '01: Revised Papers from the First International Workshop on Peer-to-Peer Systems}, organization = {Springer-Verlag}, year = {2002}, address = {London, UK}, pages = {328--338}, publisher = {Springer-Verlag}, abstract = {Peer-to-peer systems are positioned to take advantage of gains in network bandwidth, storage capacity, and computational resources to provide long-term durable storage infrastructures. In this paper, we quantitatively compare building a distributed storage infrastructure that is self-repairing and resilient to faults using either a replicated system or an erasure-resilient system. We show that systems employing erasure codes have mean time to failures many orders of magnitude higher than replicated systems with similar storage and bandwidth requirements. More importantly, erasure-resilient systems use an order of magnitude less bandwidth and storage to provide similar system durability as replicated systems}, www_section = {distributed storage, erasure coding, P2P}, isbn = {3-540-44179-4}, doi = {10.1007/3-540-45748-8}, url = {http://www.springerlink.com/content/e1kmcf729e6updgm/}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/IPTPS\%2701\%20-\%20Erasure\%20coding\%20vs.\%20replication.pdf}, }
@conference{697650, title = {Pastry: Scalable, Decentralized Object Location, and Routing for Large-Scale Peer-to-Peer Systems}, author = {Antony Rowstron and Peter Druschel}, booktitle = {Middleware '01: Proceedings of the IFIP/ACM International Conference on Distributed Systems Platforms Heidelberg}, organization = {Springer-Verlag}, year = {2001}, address = {London, UK}, pages = {329--350}, publisher = {Springer-Verlag}, abstract = {This paper presents the design and evaluation of Pastry, a scalable, distributed object location and routing substrate for wide-area peer-to-peer applications.Pastry performs application-level routing and object location in a potentially very large overlay network of nodes connected via the Internet. It can be used to support a variety of peer-to-peer applications, including global data storage, data sharing, group communication and naming. Each node in the Pastry network has a unique identifier (nodeId). When presented with a message and a key, a Pastry node efficiently routes the message to the node with a nodeId that is numerically closest to the key, among all currently live Pastry nodes. Each Pastry node keeps track of its immediate neighbors in the nodeId space, and notifies applications of new node arrivals, node failures and recoveries. Pastry takes into account network locality; it seeks to minimize the distance messages travel, according to a to scalar proximity metric like the number of IP routing hops. Pastry is completely decentralized, scalable, and self-organizing; it automatically adapts to the arrival, departure and failure of nodes. Experimental results obtained with a prototype implementation on an emulated network of up to 100,000 nodes confirm Pastry's scalability and efficiency, its ability to self-organize and adapt to node failures, and its good network locality properties}, www_section = {overlay networks, P2P}, isbn = {3-540-42800-3}, url = {http://portal.acm.org/citation.cfm?id=697650$\#$}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/pastry.pdf}, }
@conference{713855, title = {Design Evolution of the EROS Single-Level Store}, author = {Shapiro, Jonathan S. and Adams, Jonathan}, booktitle = {ATEC '02: Proceedings of the General Track of the annual conference on USENIX Annual Technical Conference}, organization = {USENIX Association}, year = {2002}, address = {Berkeley, CA, USA}, pages = {59--72}, publisher = {USENIX Association}, abstract = {File systems have (at least) two undesirable characteristics: both the addressing model and the consistency semantics differ from those of memory, leading to a change in programming model at the storage boundary. Main memory is a single flat space of pages with a simple durability (persistence) model: all or nothing. File content durability is a complex function of implementation, caching, and timing. Memory is globally consistent. File systems offer no global consistency model. Following a crash recovery, individual files may be lost or damaged, or may be collectively inconsistent even though they are individually sound}, www_section = {file systems}, isbn = {1-880446-00-6}, url = {http://portal.acm.org/citation.cfm?id=713855$\#$}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/storedesign2002.pdf}, }
@conference{714768, title = {Aspects of AMnet Signaling}, author = {Speer, Anke and Marcus Schoeller and Thomas Fuhrmann and Martina Zitterbart}, booktitle = {NETWORKING '02: Proceedings of the Second International IFIP-TC6 Networking Conference on Networking Technologies, Services, and Protocols; Performance of Computer and Communication Networks; and Mobile and Wireless Communications}, organization = {Springer-Verlag}, year = {2002}, address = {London, UK}, pages = {1214--1220}, publisher = {Springer-Verlag}, abstract = {AMnet provides a framework for flexible and rapid service creation. It is based on Programmable Networking technologies and uses active nodes (AMnodes) within the network for the provision of individual, application-specific services. To this end, these AMnodes execute service modules that are loadable on-demand and enhance the functionality of intermediate systems without the need of long global standardization processes. Placing application-dedicated functionality within the network requires a flexible signaling protocol to discover and announce as well as to establish and maintain the corresponding services. AMnet Signaling was developed for this purpose and will be presented in detail within this paper}, www_section = {multicast, programmable networks}, isbn = {3-540-43709-6}, doi = {10.1007/3-540-47906-6}, url = {http://www.springerlink.com/content/4j371710765jg14q/}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/speer02networking.pdf}, }
@conference{715916, title = {CPCMS: A Configuration Management System Based on Cryptographic Names}, author = {Shapiro, Jonathan S. and Vanderburgh, John}, booktitle = {Proceedings of the FREENIX Track: 2002 USENIX Annual Technical Conference}, organization = {USENIX Association}, year = {2002}, address = {Berkeley, CA, USA}, pages = {207--220}, publisher = {USENIX Association}, abstract = {CPCMS, the Cryptographically Protected Configuration Management System is a new configuration management system that provides scalability, disconnected commits, and fine-grain access controls. It addresses the novel problems raised by modern open-source development practices, in which projects routinely span traditional organizational boundaries and can involve thousands of participants. CPCMS provides for simultaneous public and private lines of development, with post hoc "publication" of private branches}, isbn = {1-880446-01-4}, url = {http://portal.acm.org/citation.cfm?id=715916$\#$}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.61.3184.pdf}, %%%%% ERROR: Missing field % www_section = {?????}, }
@conference{716407, title = {An Empirical Study of Delta Algorithms}, author = {Hunt, James J. and Vo, Kiem-Phong and Tichy, Walter F.}, booktitle = {ICSE '96: Proceedings of the SCM-6 Workshop on System Configuration Management}, organization = {Springer-Verlag}, year = {1996}, address = {London, UK}, pages = {49--66}, publisher = {Springer-Verlag}, abstract = {Delta algorithms compress data by encoding one file in terms of another. This type of compression is useful in a number of situations: storing multiple versions of data, distributing updates, storing backups, transmitting video sequences, and others. This paper studies the performance parameters of several delta algorithms, using a benchmark of over 1300 pairs of files taken from two successive releases of GNU software. Results indicate that modern delta compression algorithms based on Ziv-Lempel techniques significantly outperform diff, a popular but older delta compressor, in terms of compression ratio. The modern compressors also correlate better with the actual difference between files; one of them is even faster than diff in both compression and decompression speed}, isbn = {3-540-61964-X}, doi = {10.1007/BFb0023076}, url = {http://www.springerlink.com/content/584k258285p18x4g/}, www_section = {Unsorted}, }
@conference{747489, title = {Extremum Feedback for Very Large Multicast Groups}, author = {J{\"o}rg Widmer and Thomas Fuhrmann}, booktitle = {NGC '01: Proceedings of the Third International COST264 Workshop on Networked Group Communication}, organization = {Springer-Verlag}, year = {2001}, address = {London, UK}, pages = {56--75}, publisher = {Springer-Verlag}, abstract = {In multicast communication, it is often required that feedback is received from a potentially very large group of responders while at the same time a feedback implosion needs to be pre- vented. To this end, a number of feedback control mechanisms have been proposed, which rely either on tree-based feedback aggregation or timer-based feedback suppression. Usually, these mechanisms assume that it is not necessary to discriminate be- tween feedback from different receivers. However, for many applications this is not the case and feedback from receivers with certain response values is preferred (e.g., highest loss or largest delay)}, www_section = {multicast}, isbn = {3-540-42824-0}, url = {http://portal.acm.org/citation.cfm?id=648089.747489$\#$}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Widmer2001g.pdf}, }
@conference{747491, title = {Application-Level Multicast Using Content-Addressable Networks}, author = {Sylvia Paul Ratnasamy and Handley, Mark and Richard Karp and S Shenker}, booktitle = {NGC '01: Proceedings of the Third International COST264 Workshop on Networked Group Communication}, organization = {Springer-Verlag}, year = {2001}, address = {London, UK}, pages = {14--29}, publisher = {Springer-Verlag}, abstract = {Most currently proposed solutions to application-level multicast organise the group members into an application-level mesh over which a Distance-Vector routingp rotocol, or a similar algorithm, is used to construct source-rooted distribution trees. The use of a global routing protocol limits the scalability of these systems. Other proposed solutions that scale to larger numbers of receivers do so by restricting the multicast service model to be single-sourced. In this paper, we propose an application-level multicast scheme capable of scaling to large group sizes without restrictingthe service model to a single source. Our scheme builds on recent work on Content-Addressable Networks (CANs). Extendingthe CAN framework to support multicast comes at trivial additional cost and, because of the structured nature of CAN topologies, obviates the need for a multicast routingalg orithm. Given the deployment of a distributed infrastructure such as a CAN, we believe our CAN-based multicast scheme offers the dual advantages of simplicity and scalability}, www_section = {CAN, mesh networks}, isbn = {3-540-42824-0}, doi = {10.1007/3-540-45546-9}, url = {http://www.springerlink.com/content/ahdgfj8yj9exqe03/}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/can-mcast.pdf}, }
@conference{758535, title = {New Sequences of Linear Time Erasure Codes Approaching the Channel Capacity}, author = {M. Amin Shokrollahi}, booktitle = {AAECC-13: Proceedings of the 13th International Symposium on Applied Algebra, Algebraic Algorithms and Error-Correcting Codes}, organization = {Springer-Verlag}, year = {1999}, address = {London, UK}, pages = {65--76}, publisher = {Springer-Verlag}, abstract = {We will introduce a new class of erasure codes built from irregular bipartite graphs that have linear time encoding and decoding algorithms and can transmit over an erasure channel at rates arbitrarily close to the channel capacity. We also show that these codes are close to optimal with respect to the trade-off between the proximity to the channel capacity and the running time of the recovery algorithm}, www_section = {coding theory, irregular bipartite graphs, recovery algorithm}, isbn = {3-540-66723-7}, url = {http://portal.acm.org/citation.cfm?id=758535\&dl=GUIDE\&coll=GUIDE\&CFID=102355791\&CFTOKEN=32605420$\#$}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/new_sequences_of_linear_time_erasure_cod_64778.pdf}, }
@article{766661, title = {Self-Organized Public-Key Management for Mobile Ad Hoc Networks}, author = {Capkun, Srdjan and Butty{\'a}n, Levente and Hubaux, J-P}, journal = {IEEE Transactions on Mobile Computing}, volume = {2}, number = {1}, year = {2003}, address = {Piscataway, NJ, USA}, pages = {52--64}, publisher = {IEEE Educational Activities Department}, abstract = {In contrast with conventional networks, mobile ad hoc networks usually do not provide online access to trusted authorities or to centralized servers, and they exhibit frequent partitioning due to link and node failures and to node mobility. For these reasons, traditional security solutions that require online trusted authorities or certificate repositories are not well-suited for securing ad hoc networks. In this paper, we propose a fully self-organized public-key management system that allows users to generate their public-private key pairs, to issue certificates, and to perform authentication regardless of the network partitions and without any centralized services. Furthermore, our approach does not require any trusted authority, not even in the system initialization phase}, www_section = {ad-hoc networks, key authentication, PGP, public key cryptography, self-organization}, issn = {1536-1233}, doi = {10.1109/TMC.2003.1195151}, url = {http://portal.acm.org/citation.cfm?id=766655.766661$\#$}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.6.1545.pdf}, }
@article{776703, title = {Security Performance}, author = {Menasc{\'e}, Daniel}, journal = {IEEE Internet Computing}, volume = {7}, number = {3}, year = {2003}, address = {Piscataway, NJ, USA}, pages = {84--87}, publisher = {IEEE Educational Activities Department}, abstract = {Several protocols and mechanisms aim to enforce the various dimensions of security in applications ranging from email to e-commerce transactions. Adding such mechanisms and proceduresto applications and systems does not come cheaply, however, as they impose security trade-offs in the areas of performance and scalability}, www_section = {security policy, trade-off}, issn = {1089-7801}, doi = {10.1109/MIC.2003.1200305}, url = {http://portal.acm.org/citation.cfm?id=776703$\#$}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/IEEE-IC-SecurityPerformance-May-2003.pdf}, }
@conference{778418, title = {A charging and rewarding scheme for packet forwarding in multi-hop cellular networks}, author = {Salem, Naouel Ben and Levente Butty{\'a}n and Jean-Pierre Hubaux and Jakobsson, Markus}, booktitle = {MobiHoc '03: Proceedings of the 4th ACM international symposium on Mobile ad hoc networking \& computing}, organization = {ACM}, year = {2003}, address = {New York, NY, USA}, pages = {13--24}, publisher = {ACM}, abstract = {In multi-hop cellular networks, data packets have to be relayed hop by hop from a given mobile station to a base station and vice-versa. This means that the mobile stations must accept to forward information for the benefit of other stations. In this paper, we propose an incentive mechanism that is based on a charging/rewarding scheme and that makes collaboration rational for selfish nodes. We base our solution on symmetric cryptography to cope with the limited resources of the mobile stations. We provide a set of protocols and study their robustness with respect to various attacks. By leveraging on the relative stability of the routes, our solution leads to a very moderate overhead}, www_section = {ad-hoc networks, charging, cooperation, hybrid cellular networks, multi-hop networks, packet forwarding}, isbn = {1-58113-684-6}, doi = {10.1145/778415.778418}, url = {http://portal.acm.org/citation.cfm?id=778418$\#$}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/BenSalemBHJ03mobihoc.pdf}, }
@article{78977, title = {Skip lists: a probabilistic alternative to balanced trees}, author = {Pugh, William}, journal = {Commun. ACM}, volume = {33}, number = {6}, year = {1990}, address = {New York, NY, USA}, pages = {668--676}, publisher = {ACM}, abstract = {Skip lists are data structures that use probabilistic balancing rather than strictly enforced balancing. As a result, the algorithms for insertion and deletion in skip lists are much simpler and significantly faster than equivalent algorithms for balanced trees}, www_section = {data structures, search}, issn = {0001-0782}, doi = {10.1145/78973.78977}, url = {http://portal.acm.org/citation.cfm?id=78977$\#$}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.85.9211.pdf}, }
@conference{792432, title = {Supporting Peer-to-Peer Computing with FlexiNet}, author = {Thomas Fuhrmann}, booktitle = {CCGRID '03: Proceedings of the 3st International Symposium on Cluster Computing and the Grid}, organization = {IEEE Computer Society}, year = {2003}, address = {Washington, DC, USA}, pages = {0--390}, publisher = {IEEE Computer Society}, abstract = {Formation of suitable overlay-network topologiesthat are able to reflect the structure of the underlying network-infrastructure, has rarely been addressedby peer-to-peer applications so far. Often, peer-to-peerprotocols restrain to purely random formation of theiroverlay-network. This leads to a far from optimal performance of such peer-to-peer networks and ruthlesslywastes network resources.In this paper, we describe a simple mechanism thatuses programmable network technologies to improvethe topology formation process of unstructured peer-to-peer networks. Being a network service, our mechanismdoes not require any modification of existing applications or computing systems. By that, it assists networkoperators with improving the performance of their network and relieves programmers from the burden of designing and implementing topology-aware peer-to-peerprotocols.Although we use the well-know Gnutella protocol todescribe the mechanism of our proposed service, it applies to all kinds of unstructured global peer-to-peercomputing applications}, www_section = {overlay networks, programmable networks, topology matching}, isbn = {0-7695-1919-9}, url = {http://portal.acm.org/citation.cfm?id=791231.792432$\#$}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/fuhrmann03supportingP2P.pdf}, }
@conference{792493, title = {An Overlay-Network Approach for Distributed Access to SRS}, author = {Thomas Fuhrmann and Andrea Schafferhans and Etzold, Thure}, booktitle = {CCGRID '03: Proceedings of the 3st International Symposium on Cluster Computing and the Grid}, organization = {IEEE Computer Society}, year = {2003}, address = {Washington, DC, USA}, pages = {0--601}, publisher = {IEEE Computer Society}, abstract = {SRS is a widely used system for integrating biologicaldatabases. Currently, SRS relies only on locally providedcopies of these databases. In this paper we propose a mechanism that also allows the seamless integration of remotedatabases. To this end, our proposed mechanism splits theexisting SRS functionality into two components and addsa third component that enables us to employ peer-to-peercomputing techniques to create optimized overlay-networkswithin which database queries can efficiently be routed. Asan additional benefit, this mechanism also reduces the administration effort that would be needed with a conventionalapproach using replicated databases}, www_section = {overlay networks, P2P, SRS}, isbn = {0-7695-1919-9}, url = {http://portal.acm.org/citation.cfm?id=792493$\#$}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/fuhrmann03overlaySRS.pdf}, }
@conference{820485, title = {Energy-Efficient Communication Protocol for Wireless Microsensor Networks}, author = {Heinzelman, Wendi Rabiner and Chandrakasan, Anantha and Hari Balakrishnan}, booktitle = {HICSS '00: Proceedings of the 33rd Hawaii International Conference on System Sciences-Volume 8}, organization = {IEEE Computer Society}, year = {2000}, address = {Washington, DC, USA}, pages = {0--8020}, publisher = {IEEE Computer Society}, abstract = {Wireless distributed micro-sensor systems will enable the reliable monitoring of a variety of environments for both civil and military applications. In this paper, we look at communication protocols, which can have significant impact on the overall energy dissipation of these networks.Based on our findings that the conventional protocols of direct transmission, minimum-transmission-energy, multihop routing, and static clustering may not be optimal for sensor networks, we propose LEACH (Low-Energy Adaptive Clustering Hierarchy), a clustering-based protocol that utilizes randomized rotation of local cluster base stations (cluster-heads) to evenly distribute the energy load among the sensors in the network. LEACH uses localized coordination to enable scalability and robustness for dynamic net-works, and incorporates data fusion into the routing protocol to reduce the amount of information that must be transmitted to the base station. Simulations show that LEACH can achieve as much as a factor of 8 reduction in energy dissipation compared with conventional routing protocols. In addition, LEACH is able to distribute energy dissipation evenly throughout the sensors, doubling the useful system lifetime for the networks we simulated}, www_section = {Low-Energy Adaptive Clustering Hierarchy, mobile Ad-hoc networks, routing, wireless sensor network}, isbn = {0-7695-0493-0}, url = {http://portal.acm.org/citation.cfm?id=820485$\#$}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.90.8499.pdf}, }
@article{844156, title = {Secure routing for structured peer-to-peer overlay networks}, author = {Miguel Castro and Peter Druschel and Ganesh, Ayalvadi and Antony Rowstron and Dan S. Wallach}, journal = {SIGOPS Oper. Syst. Rev}, volume = {36}, number = {SI}, year = {2002}, address = {New York, NY, USA}, pages = {299--314}, publisher = {ACM}, abstract = {Structured peer-to-peer overlay networks provide a substrate for the construction of large-scale, decentralized applications, including distributed storage, group communication, and content distribution. These overlays are highly resilient; they can route messages correctly even when a large fraction of the nodes crash or the network partitions. But current overlays are not secure; even a small fraction of malicious nodes can prevent correct message delivery throughout the overlay. This problem is particularly serious in open peer-to-peer systems, where many diverse, autonomous parties without preexisting trust relationships wish to pool their resources. This paper studies attacks aimed at preventing correct message delivery in structured peer-to-peer overlays and presents defenses to these attacks. We describe and evaluate techniques that allow nodes to join the overlay, to maintain routing state, and to forward messages securely in the presence of malicious nodes}, www_section = {P2P, resilient overlay network}, issn = {0163-5980}, doi = {10.1145/844128.844156}, url = {http://portal.acm.org/citation.cfm?id=844156$\#$}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/osdi2002.pdf}, }
@conference{863960, title = {A delay-tolerant network architecture for challenged internets}, author = {Fall, Kevin}, booktitle = {SIGCOMM '03: Proceedings of the 2003 conference on Applications, technologies, architectures, and protocols for computer communications}, organization = {ACM}, year = {2003}, address = {New York, NY, USA}, pages = {27--34}, publisher = {ACM}, abstract = {The highly successful architecture and protocols of today's Internet may operate poorly in environments characterized by very long delay paths and frequent network partitions. These problems are exacerbated by end nodes with limited power or memory resources. Often deployed in mobile and extreme environments lacking continuous connectivity, many such networks have their own specialized protocols, and do not utilize IP. To achieve interoperability between them, we propose a network architecture and application interface structured around optionally-reliable asynchronous message forwarding, with limited expectations of end-to-end connectivity and node resources. The architecture operates as an overlay above the transport layers of the networks it interconnects, and provides key services such as in-network data storage and retransmission, interoperable naming, authenticated forwarding and a coarse-grained class of service}, isbn = {1-58113-735-4}, doi = {10.1145/863955.863960}, url = {http://portal.acm.org/citation.cfm?id=863960$\#$}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/IRB-TR-03-003.pdf}, %%%%% ERROR: Missing field % www_section = {?????}, }
@conference{864000, title = {Making gnutella-like P2P systems scalable}, author = {Chawathe, Yatin and Breslau, Lee and Lanham, Nick and S Shenker}, booktitle = {SIGCOMM '03: Proceedings of the 2003 conference on Applications, technologies, architectures, and protocols for computer communications}, organization = {ACM}, year = {2003}, address = {New York, NY, USA}, pages = {407--418}, publisher = {ACM}, abstract = {Napster pioneered the idea of peer-to-peer file sharing, and supported it with a centralized file search facility. Subsequent P2P systems like Gnutella adopted decentralized search algorithms. However, Gnutella's notoriously poor scaling led some to propose distributed hash table solutions to the wide-area file search problem. Contrary to that trend, we advocate retaining Gnutella's simplicity while proposing new mechanisms that greatly improve its scalability. Building upon prior research [1, 12, 22], we propose several modifications to Gnutella's design that dynamically adapt the overlay topology and the search algorithms in order to accommodate the natural heterogeneity present in most peer-to-peer systems. We test our design through simulations and the results show three to five orders of magnitude improvement in total system capacity. We also report on a prototype implementation and its deployment on a testbed}, www_section = {distributed hash table, Gnutella, P2P}, isbn = {1-58113-735-4}, doi = {10.1145/863955.864000}, url = {http://portal.acm.org/citation.cfm?id=864000$\#$}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.10.5444.pdf}, }
@conference{873217, title = {Improving Data Availability through Dynamic Model-Driven Replication in Large Peer-to-Peer Communities}, author = {Ranganathan, Kavitha and Iamnitchi, Adriana and Foster, Ian}, booktitle = {CCGRID '02: Proceedings of the 2nd IEEE/ACM International Symposium on Cluster Computing and the Grid}, organization = {IEEE Computer Society}, year = {2002}, address = {Washington, DC, USA}, pages = {0--376}, publisher = {IEEE Computer Society}, abstract = {Efficient data sharing in global peer-to-peer systems is complicated by erratic node failure, unreliable networkconnectivity and limited bandwidth.Replicating data onmultiple nodes can improve availability and response time.Yet determining when and where to replicate data in orderto meet performance goals in large-scale systems withmany users and files, dynamic network characteristics, and changing user behavior is difficult.We propose anapproach in which peers create replicas automatically in a decentralized fashion, as required to meet availabilitygoals.The aim of our framework is to maintain a thresholdlevel of availability at all times.We identify a set of factors that hinder data availabilityand propose a model that decides when more replication isnecessary.We evaluate the accuracy and performance ofthe proposed model using simulations.Our preliminaryresults show that the model is effective in predicting therequired number of replicas in the system}, www_section = {data sharing, model-driven, P2P}, isbn = {0-7695-1582-7}, url = {http://portal.acm.org/citation.cfm?id=873217$\#$}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.16.909.pdf}, }
@booklet{896561, title = {On the Scaling of Feedback Algorithms for Very Large Multicast Groups}, author = {Thomas Fuhrmann}, year = {2000}, publisher = {University of Mannheim}, abstract = {Feedback from multicast group members is vital for many multicast protocols. In order to avoid feedback implosion in very large groups feedback algorithms with well behaved scaling-properties must be chosen. In this paper we analyse the performance of three typical feedback algorithms described in the literature. Apart from the basic trade-off between feedback latency and response duplicates we especially focus on the algorithms'' sensitivity to the quality of the group size estimation. Based on this analysis we give recommendations for the choice of well behaved feedback algorithms that are suitable for very large groups}, url = {http://portal.acm.org/citation.cfm?id=896561$\#$}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Fuhrmann2001a.pdf}, %%%%% ERROR: Missing field % www_section = {?????}, }
@booklet{898770, title = {Libckpt: Transparent Checkpointing under Unix}, author = {James S. Plank and Beck, Micah and Kingsley, Gerry and Li, Kai}, year = {1994}, address = {Knoxville, TN, USA}, publisher = {University of Tennessee}, abstract = {Checkpointing is a simple technique for rollback recovery: the state of an executing program is periodically saved to a disk file from which it can be recovered after a failure. While recent research has developed a collection of powerful techniques for minimizing the overhead of writing checkpoint files, checkpointing remains unavailable to most application developers. In this paper we describe libckpt, a portable checkpointing tool for Unix that implements all applicable performance optimizations which are reported in the literature. While libckpt can be used in a mode which is almost totally transparent to the programmer, it also supports the incorporation of user directives into the creation of checkpoints. This user-directed checkpointing is an innovation which is unique to our work. 1 Introduction Consider a programmer who has developed an application which will take a long time to execute, say five days. Two days into the computation, the processor on which the application is}, www_section = {checkpointing, performance analysis}, url = {http://portal.acm.org/citation.cfm?id=898770$\#$}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.55.257.pdf}, }
@mastersthesis{937250, title = {A scalable content-addressable network}, author = {Sylvia Paul Ratnasamy}, school = {University of California, Berkeley}, year = {2002}, type = {phd}, note = {Chair-Shenker, Scott and Chair-Stoica, Ion}, www_section = {CAN, distributed hash table}, url = {www.icir.org/sylvia/thesis.ps}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/can.pdf}, }
@conference{939011, title = {Ad hoc-VCG: a truthful and cost-efficient routing protocol for mobile ad hoc networks with selfish agents}, author = {Anderegg, Luzi and Eidenbenz, Stephan}, booktitle = {MobiCom '03: Proceedings of the 9th annual international conference on Mobile computing and networking}, organization = {ACM}, year = {2003}, address = {New York, NY, USA}, pages = {245--259}, publisher = {ACM}, abstract = {We introduce a game-theoretic setting for routing in a mobile ad hoc network that consists of greedy, selfish agents who accept payments for forwarding data for other agents if the payments cover their individual costs incurred by forwarding data. In this setting, we propose Ad hoc-VCG, a reactive routing protocol that achieves the design objectives of truthfulness (i.e., it is in the agents' best interest to reveal their true costs for forwarding data) and cost-efficiency (i.e., it guarantees that routing is done along the most cost-efficient path) in a game-theoretic sense by paying to the intermediate nodes a premium over their actual costs for forwarding data packets. We show that the total overpayment (i.e., the sum of all premiums paid) is relatively small by giving a theoretical upper bound and by providing experimental evidence. Our routing protocol implements a variation of the well-known mechanism by Vickrey, Clarke, and Groves in a mobile network setting. Finally, we analyze a very natural routing protocol that is an adaptation of the Packet Purse Model [8] with auctions in our setting and show that, unfortunately, it does not achieve cost-efficiency or truthfulness}, www_section = {ad-hoc networks, energy efficiency, game theory, mechanism design, routing, selfish agents, VCG mechanism}, isbn = {1-58113-753-2}, doi = {10.1145/938985.939011}, url = {http://portal.acm.org/citation.cfm?id=939011$\#$}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.103.7483.pdf}, }
@article{939859, title = {Wireless Community Networks}, author = {Jain, Saurabh and Agrawal, Dharma P.}, journal = {Computer}, volume = {36}, number = {8}, year = {2003}, address = {Los Alamitos, CA, USA}, pages = {90--92}, publisher = {IEEE Computer Society Press}, issn = {0018-9162}, doi = {10.1109/MC.2003.1220588}, url = {http://portal.acm.org/citation.cfm?id=939824.939859$\#$}, %%%%% ERROR: Missing field % www_section = {?????}, }
@article{942421, title = {Stimulating cooperation in self-organizing mobile ad hoc networks}, author = {Levente Butty{\'a}n and Jean-Pierre Hubaux}, journal = {Mob. Netw. Appl}, volume = {8}, number = {5}, year = {2003}, address = {Hingham, MA, USA}, pages = {579--592}, publisher = {Kluwer Academic Publishers}, abstract = {In military and rescue applications of mobile ad hoc networks, all the nodes belong to the same authority; therefore, they are motivated to cooperate in order to support the basic functions of the network. In this paper, we consider the case when each node is its own authority and tries to maximize the benefits it gets from the network. More precisely, we assume that the nodes are not willing to forward packets for the benefit of other nodes. This problem may arise in civilian applications of mobile ad hoc networks. In order to stimulate the nodes for packet forwarding, we propose a simple mechanism based on a counter in each node. We study the behavior of the proposed mechanism analytically and by means of simulations, and detail the way in which it could be protected against misuse}, www_section = {ad-hoc networks, cooperation, self-organization}, issn = {1383-469X}, doi = {10.1023/A:1025146013151}, url = {http://portal.acm.org/citation.cfm?id=942421$\#$}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/ButtyanH03monet.pdf}, }
@conference{948119, title = {Establishing pairwise keys in distributed sensor networks}, author = {Liu, Donggang and Peng Ning}, booktitle = {CCS '03: Proceedings of the 10th ACM conference on Computer and communications security}, organization = {ACM}, year = {2003}, address = {New York, NY, USA}, pages = {52--61}, publisher = {ACM}, abstract = {Pairwise key establishment is a fundamental security service in sensor networks; it enables sensor nodes to communicate securely with each other using cryptographic techniques. However, due to the resource constraints on sensors, it is infeasible to use traditional key management techniques such as public key cryptography and key distribution center (KDC). To facilitate the study of novel pairwise key predistribution techniques, this paper presents a general framework for establishing pairwise keys between sensors on the basis of a polynomial-based key predistribution protocol [2]. This paper then presents two efficient instantiations of the general framework: a random subset assignment key predistribution scheme and a grid-based key predistribution scheme. The analysis in this paper indicates that these two schemes have a number of nice properties, including high probability (or guarantee) to establish pairwise keys, tolerance of node captures, and low communication overhead. Finally, this paper presents a technique to reduce the computation at sensors required by these schemes}, www_section = {key management, probabilistic key sharing, sensor networks}, isbn = {1-58113-738-9}, doi = {10.1145/948109.948119}, url = {http://portal.acm.org/citation.cfm?id=948119$\#$}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/ccs03-SNKeyMan.pdf}, }
@conference{958494, title = {Taming the underlying challenges of reliable multihop routing in sensor networks}, author = {Woo, Alec and Tong, Terence and Culler, David}, booktitle = {SenSys '03: Proceedings of the 1st international conference on Embedded networked sensor systems}, organization = {ACM}, year = {2003}, address = {New York, NY, USA}, pages = {14--27}, publisher = {ACM}, abstract = {The dynamic and lossy nature of wireless communication poses major challenges to reliable, self-organizing multihop networks. These non-ideal characteristics are more problematic with the primitive, low-power radio transceivers found in sensor networks, and raise new issues that routing protocols must address. Link connectivity statistics should be captured dynamically through an efficient yet adaptive link estimator and routing decisions should exploit such connectivity statistics to achieve reliability. Link status and routing information must be maintained in a neighborhood table with constant space regardless of cell density. We study and evaluate link estimator, neighborhood table management, and reliable routing protocol techniques. We focus on a many-to-one, periodic data collection workload. We narrow the design space through evaluations on large-scale, high-level simulations to 50-node, in-depth empirical experiments. The most effective solution uses a simple time averaged EWMA estimator, frequency based table management, and cost-based routing}, www_section = {link estimation, multi-hop networks, neighborhood management, reliability, sensor networks}, isbn = {1-58113-707-9}, doi = {10.1145/958491.958494}, url = {http://portal.acm.org/citation.cfm?id=958494$\#$}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/p14-woo.pdf}, }
@article{987233, title = {Internet indirection infrastructure}, author = {Ion Stoica and Adkins, Daniel and Shelley Zhuang and S Shenker and Surana, Sonesh}, journal = {IEEE/ACM Trans. Netw}, volume = {12}, number = {2}, year = {2004}, address = {Piscataway, NJ, USA}, pages = {205--218}, publisher = {IEEE Press}, abstract = {Attempts to generalize the Internet's point-to-point communication abstraction to provide services like multicast, anycast, and mobility have faced challenging technical problems and deployment barriers. To ease the deployment of such services, this paper proposes a general, overlay-based Internet Indirection Infrastructure (i3) that offers a rendezvous-based communication abstraction. Instead of explicitly sending a packet to a destination, each packet is associated with an identifier; this identifier is then used by the receiver to obtain delivery of the packet. This level of indirection decouples the act of sending from the act of receiving, and allows i3 to efficiently support a wide variety of fundamental communication services. To demonstrate the feasibility of this approach, we have designed and built a prototype based on the Chord lookup protocol}, www_section = {indirection, mobility, multicast, network infrastructure, service composition}, issn = {1063-6692}, doi = {10.1109/TNET.2004.826279}, url = {http://portal.acm.org/citation.cfm?id=987233$\#$}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/i3.pdf}, }
@article{9999, title = {The Gnutella Protocol Specification v0.4}, author = {TODO}, journal = {unknown}, year = {2001}, abstract = {A brief description of the gnutella protocol}, url = {http://www9.limewire.com/developer/gnutella_protocol_0.4.pdf}, %%%%% ERROR: Missing field % www_section = {?????}, }
@booklet{Aad06packetcoding, title = {Packet coding for strong anonymity in ad hoc networks}, author = {Imad Aad and Claude Castelluccia and Jean-Pierre Hubaux}, year = {2006}, abstract = {Several techniques to improve anonymity have been proposed in the literature. They rely basically on multicast or on onion routing to thwart global attackers or local attackers respectively. None of the techniques provide a combined solution due to the incompatibility between the two components, as we show in this paper. We propose novel packet coding techniques that make the combination possible, thus integrating the advantages in a more complete and robust solution}, www_section = {anonymity, onion routing, robustness}, url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.88.2407}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.88.2407_0.pdf}, }
@booklet{Aberer03p-grid:a, title = {P-Grid: A Self-organizing Structured P2P System}, author = {Karl Aberer and Philippe Cudre-Mauroux and Anwitaman Datta and Zoran Despotovic and Manfred Hauswirth and Magdalena Punceva and Roman Schmidt}, year = {2003}, abstract = {this paper was supported in part by the National Competence Center in Research on Mobile Information and Communication Systems (NCCR-MICS), a center supported by the Swiss National Science Foundation under grant number 5005-67322 and by SNSF grant 2100064994, "Peer-to-Peer Information Systems." messages. From the responses it (randomly) selects certain peers to which direct network links are established}, www_section = {P2P}, url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.3.5649}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.3.5649.pdf}, }
@conference{Aberer04multifacetedsimultaneous, title = {Multifaceted Simultaneous Load Balancing in DHT-based P2P systems: A new game with old balls and bins}, author = {Karl Aberer and Anwitaman Datta and Manfred Hauswirth}, booktitle = {Self-* Properties in Complex Information Systems, {\textquotedblleft}Hot Topics{\textquotedblright} series, LNCS}, organization = {Springer}, year = {2004}, publisher = {Springer}, abstract = {In this paper we present and evaluate uncoordinated on-line algorithms for simultaneous storage and replication load-balancing in DHT-based peer-to-peer systems. We compare our approach with the classical balls into bins model, and point out the similarities but also the differences which call for new loadbalancing mechanisms specifically targeted at P2P systems. Some of the peculiarities of P2P systems, which make our problem even more challenging are that both the network membership and the data indexed in the network is dynamic, there is neither global coordination nor global information to rely on, and the load-balancing mechanism ideally should not compromise the structural properties and thus the search efficiency of the DHT, while preserving the semantic information of the data (e.g., lexicographic ordering to enable range searches)}, www_section = {distributed hash table, P2P, storage}, url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.9.3746}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/self-star-load-balance.pdf}, }
@conference{Acedanski05howgood, title = {How good is random linear coding based distributed networked storage?}, author = {Szymon Aceda{\'n}ski and Supratim Deb and Muriel M{\'e}dard and Ralf Koetter}, booktitle = {NetCod'05--First Workshop on Network Coding, Theory, and Applications}, organization = {Citeseer}, year = {2005}, month = {April}, address = {Riva del Garda, Italy}, publisher = {Citeseer}, abstract = {We consider the problem of storing a large file or multiple large files in a distributed manner over a network. In the framework we consider, there are multiple storage locations, each of which only have very limited storage space for each file. Each storage location chooses a part (or a coded version of the parts) of the file without the knowledge of what is stored in the other locations. We want a file-downloader to connect to as few storage locations as possible and retrieve the entire file. We compare the performance of three strategies: uncoded storage, traditional erasure coding based storage, random linear coding based storage motivated by network coding. We demonstrate that, in principle, a traditional erasure coding based storage (eg: Reed-Solomon Codes) strategy can almost do as well as one can ask for with appropriate choice of parameters. However, the cost is a large amount of additional storage space required at the centralized server before distribution among multiple locations. The random linear coding based strategy performs as well without suffering from any such disadvantage. Further, with a probability close to one, the minimum number of storage location a downloader needs to connect to (for reconstructing the entire file), can be very close to the case where there is complete coordination between the storage locations and the downloader. We also argue that an uncoded strategy performs poorly}, www_section = {distributed networked storage, limited storage, linear coding, multiple storage locations}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/NetCod\%2705\%20-\%20Random\%20linear\%20coding\%20based\%20distributed\%20networked\%20storage.pdf}, }
@conference{Acquisti03onthe, title = {On the Economics of Anonymity}, author = {Alessandro Acquisti and Roger Dingledine and Paul Syverson}, booktitle = {Financial Cryptography. Springer-Verlag, LNCS 2742}, year = {2003}, pages = {84--102}, abstract = {Decentralized anonymity infrastructures are still not in wide use today. While there are technical barriers to a secure robust design, our lack of understanding of the incentives to participate in such systems remains a major roadblock. Here we explore some reasons why anonymity systems are particularly hard to deploy, enumerate the incentives to participate either as senders or also as nodes, and build a general model to describe the effects of these incentives. We then describe and justify some simplifying assumptions to make the model manageable, and compare optimal strategies for participants based on a variety of scenarios}, url = {http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.13.5636\&rep=rep1\&type=pdf}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.13.5636.pdf}, %%%%% ERROR: Missing field % www_section = {?????}, }
@booklet{Acquisti04privacyin, title = {Privacy in Electronic Commerce and the Economics of Immediate Gratification}, author = {Alessandro Acquisti}, year = {2004}, abstract = {Dichotomies between privacy attitudes and behavior have been noted in the literature but not yet fully explained. We apply lessons from the research on behavioral economics to understand the individual decision making process with respect to privacy in electronic commerce. We show that it is unrealistic to expect individual rationality in this context. Models of self-control problems and immediate gratification offer more realistic descriptions of the decision process and are more consistent with currently available data. In particular, we show why individuals who may genuinely want to protect their privacy might not do so because of psychological distortions well documented in the behavioral literature; we show that these distortions may affect not only {\textquoteleft}na{\"\i}ve' individuals but also {\textquoteleft}sophisticated' ones; and we prove that this may occur also when individuals perceive the risks from not protecting their privacy as significant}, url = {http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.58.3760\&rep=rep1\&type=pdf}, %%%%% ERROR: Missing field % www_section = {?????}, }
@article{Adya:2002:FFA:844128.844130, title = {FARSITE: Federated, Available, and Reliable Storage for an Incompletely Trusted Environment}, author = {Adya, Atul and Bolosky, William J. and Miguel Castro and Cermak, Gerald and Chaiken, Ronnie and John R. Douceur and Howell, Jon and Lorch, Jacob R. and Marvin Theimer and Roger Wattenhofer}, journal = {ACM SIGOPS Operating Systems Review}, volume = {36}, year = {2002}, month = {December}, address = {New York, NY, USA}, pages = {1--14}, publisher = {ACM}, abstract = {Farsite is a secure, scalable file system that logically functions as a centralized file server but is physically distributed among a set of untrusted computers. Farsite provides file availability and reliability through randomized replicated storage; it ensures the secrecy of file contents with cryptographic techniques; it maintains the integrity of file and directory data with a Byzantine-fault-tolerant protocol; it is designed to be scalable by using a distributed hint mechanism and delegation certificates for pathname translations; and it achieves good performance by locally caching file data, lazily propagating file updates, and varying the duration and granularity of content leases. We report on the design of Farsite and the lessons we have learned by implementing much of that design}, www_section = {centralized file server, farsite, file system, randomized replicaed storage}, issn = {0163-5980}, doi = {http://doi.acm.org/10.1145/844128.844130}, url = {http://doi.acm.org/10.1145/844128.844130}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/SIGOPS\%20-\%20FARSITE.pdf}, }
@conference{Aekaterinidis2006PastryStrings, title = {PastryStrings: A Comprehensive Content-Based Publish/Subscribe DHT Network}, author = {Aekaterinidis, Ioannis and Triantafillou, Peter}, booktitle = {Proceedings of the 26th IEEE International Conference on Distributed Computing Systems}, organization = {IEEE Computer Society}, year = {2006}, address = {Washington, DC, USA}, pages = {0--23}, publisher = {IEEE Computer Society}, series = {ICDCS '06}, isbn = {0-7695-2540-7}, doi = {10.1109/ICDCS.2006.63}, url = {http://dx.doi.org/10.1109/ICDCS.2006.63}, %%%%% ERROR: Missing field % www_section = {?????}, }
@conference{AhmedBoutaba2006DistributedPatternMatching, title = {Distributed Pattern Matching: A Key to Flexible and Efficient P2P Search}, author = {Ahmed, R. and Boutaba, R.}, booktitle = {2006 IEEE/IFIP Network Operations and Management Symposium NOMS 2006}, organization = {IEEE}, year = {2006}, pages = {198--208}, publisher = {IEEE}, abstract = {Flexibility and efficiency are the prime requirements for any P2P search mechanism. Existing P2P systems do not seem to provide satisfactory solution for achieving these two conflicting goals. Unstructured search protocols (as adopted in Gnutella and FastTrack), provide search flexibility but exhibit poor performance characteristics. Structured search techniques (mostly distributed hash table (DHT)-based), on the other hand, can efficiently route queries to target peers but support exact-match queries only. In this paper we present a novel P2P system, called distributed pattern matching system (DPMS), for enabling flexible and efficient search. Distributed pattern matching can be used to solve problems like wildcard searching (for file-sharing P2P systems), partial service description matching (for service discovery systems) etc. DPMS uses a hierarchy of indexing peers for disseminating advertised patterns. Patterns are aggregated and replicated at each level along the hierarchy. Replication improves availability and resilience to peer failure, and aggregation reduces storage overhead. An advertised pattern can be discovered using any subset of its 1-bits; this allows inexact matching and queries in conjunctive normal form. Search complexity (i.e., the number of peers to be probed) in DPMS is O (log N + zetalog N/log N), where N is the total number of peers and zeta is proportional to the number of matches, required in a search result. The impact of churn problem is less severe in DPMS than DHT-based systems. Moreover, DPMS provides guarantee on search completeness for moderately stable networks. We demonstrate the effectiveness of DPMS using mathematical analysis and simulation results}, www_section = {matching, P2P, search}, isbn = {1-4244-0142-9}, doi = {10.1109/NOMS.2006.1687551}, url = {http://dx.doi.org/10.1109/NOMS.2006.1687551}, }
@conference{Ahn03k-anonymousmessage, title = {k-Anonymous Message Transmission}, author = {Luis von Ahn and Andrew Bortz and Nicholas J. Hopper}, booktitle = {Conference on Computer and Communications Security}, organization = {ACM New York, NY, USA}, year = {2003}, month = {January}, address = {Washington D.C., USA}, publisher = {ACM New York, NY, USA}, abstract = {Informally, a communication protocol is sender k--anonymous if it can guarantee that an adversary, trying to determine the sender of a particular message, can only narrow down its search to a set of k suspects. Receiver k-anonymity places a similar guarantee on the receiver: an adversary, at best, can only narrow down the possible receivers to a set of size k. In this paper we introduce the notions of sender and receiver k-anonymity and consider their applications. We show that there exist simple and e$\#$cient protocols which are k-anonymous for both the sender and the receiver in a model where a polynomial time adversary can see all tra$\#$c in the network and can control up to a constant fraction of the participants. Our protocol is provably secure, practical, and does not require the existence of trusted third parties. This paper also provides a conceptually simple augmentation to Chaum's DC-Nets that adds robustness against adversaries who attempt to disrupt the protocol through perpetual transmission or selective non-participation}, isbn = {1-58113-738-9}, url = {http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.9.9348\&rep=rep1\&type=url\&i=2}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/k-anonymous_ccs2003.pdf}, %%%%% ERROR: Missing field % www_section = {?????}, }
@conference{Ali:2005:PTA:1082473.1082631, title = {Preprocessing techniques for accelerating the DCOP algorithm ADOPT}, author = {Ali, Syed and Koenig, Sven and Tambe, Milind}, booktitle = {AAMAS'05--Proceedings of the fourth international joint conference on Autonomous agents and multiagent systems}, organization = {ACM}, year = {2005}, month = {July}, address = {Utrecht, Netherlands}, pages = {1041--1048}, publisher = {ACM}, series = {AAMAS '05}, abstract = {Methods for solving Distributed Constraint Optimization Problems (DCOP) have emerged as key techniques for distributed reasoning. Yet, their application faces significant hurdles in many multiagent domains due to their inefficiency. Preprocessing techniques have successfully been used to speed up algorithms for centralized constraint satisfaction problems. This paper introduces a framework of different preprocessing techniques that are based on dynamic programming and speed up ADOPT, an asynchronous complete and optimal DCOP algorithm. We investigate when preprocessing is useful and which factors influence the resulting speedups in two DCOP domains, namely graph coloring and distributed sensor networks. Our experimental results demonstrate that our preprocessing techniques are fast and can speed up ADOPT by an order of magnitude}, www_section = {ADOPT algorithm, DCOP, distributed constraint optimization}, isbn = {1-59593-093-0}, doi = {10.1145/1082473.1082631}, url = {http://doi.acm.org/10.1145/1082473.1082631}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/AAMAS\%2705\%20-\%20Accelerating\%20the\%20DCOP\%20algorithm\%20ADOPT.pdf}, }
@mastersthesis{Amnefelt04keso-, title = {Keso--a Scalable, Reliable and Secure Read/Write Peer-to-Peer File System}, author = {Mattias Amnefelt and Johanna Svenningsson}, school = {KTH/Royal Institute of Technology}, year = {2004}, month = {May}, address = {Stockholm}, pages = {0--77}, type = {Master's Thesis}, abstract = {In this thesis we present the design of Keso, a distributed and completely decentralized file system based on the peer-to-peer overlay network DKS. While designing Keso we have taken into account many of the problems that exist in today's distributed file systems. Traditionally, distributed file systems have been built around dedicated file servers which often use expensive hardware to minimize the risk of breakdown and to handle the load. System administrators are required to monitor the load and disk usage of the file servers and to manually add clients and servers to the system. Another drawback with centralized file systems are that a lot of storage space is unused on clients. Measurements we have taken on existing computer systems has shown that a large part of the storage capacity of workstations is unused. In the system we looked at there was three times as much storage space available on workstations than was stored in the distributed file system. We have also shown that much data stored in a production use distributed file system is redundant. The main goals for the design of Keso has been that it should make use of spare resources, avoid storing unnecessarily redundant data, scale well, be self-organizing and be a secure file system suitable for a real world environment. By basing Keso on peer-to-peer techniques it becomes highly scalable, fault tolerant and self-organizing. Keso is intended to run on ordinary workstations and can make use of the previously unused storage space. Keso also provides means for access control and data privacy despite being built on top of untrusted components. The file system utilizes the fact that a lot of data stored in traditional file systems is redundant by letting all files that contains a datablock with the same contents reference the same datablock in the file system. This is achieved while still maintaining access control and data privacy}, www_section = {decentralized file system, DKS, Keso}, url = {http://mattias.amnefe.lt/keso/}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Amnefelt\%20\%26\%20Svenningsson\%20-\%20Keso.pdf}, }
@conference{Anderson96theeternity, title = {The Eternity Service}, author = {Ross Anderson}, booktitle = {Pragocrypt'96--Proceedings of the 1st International Conference on the Theory and Applications of Crytology}, year = {1996}, month = {September}, address = {Prague, CZ}, pages = {242--252}, abstract = {The Internet was designed to provide a communications channel that is as resistant to denial of service attacks as human ingenuity can make it. In this note, we propose the construction of a storage medium with similar properties. The basic idea is to use redundancy and scattering techniques to replicate data across a large set of machines (such as the Internet), and add anonymity mechanisms to drive up the cost of selective service denial attacks. The detailed design of this service is an interesting scientific problem, and is not merely academic: the service may be vital in safeguarding individual rights against new threats posed by the spread of electronic publishing}, url = {http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.16.1952\&rep=rep1\&type=pdf}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/eternity.pdf}, www_section = {Unsorted}, }
@conference{Andrade04whencan, title = {When Can an Autonomous Reputation Scheme Discourage Free-riding in a Peer-to-Peer System?}, author = {Nazareno Andrade and Miranda Mowbray and Walfredo Cirne and Francisco Brasileiro}, booktitle = {in: CCGRID '04: Proceedings of the 2004 IEEE International Symposium on Cluster Computing and the Grid, IEEE Computer Society}, year = {2004}, pages = {440--448}, abstract = {We investigate the circumstances under which it is possible to discourage free-riding in a peer-to-peer system for resource-sharing by prioritizing resource allocation to peers with higher reputation. We use a model to predict conditions necessary for any reputation scheme to succeed in discouraging free-riding by this method. We show with simulations that for representative cases, a very simple autonomous reputation scheme works nearly as well at discouraging free-riding as an ideal reputation scheme. Finally, we investigate the expected dynamic behavior of the system}, url = {http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.4.9659\&rep=rep1\&type=pdf}, %%%%% ERROR: Missing field % www_section = {?????}, }
@conference{Andrade:2005:ICB:1080192.1080198, title = {Influences on cooperation in BitTorrent communities}, author = {Nazareno Andrade and Miranda Mowbray and Lima, Aliandro and Wagner, Gustavo and Ripeanu, Matei}, booktitle = {P2PEcon'05. Proceedings of the 2005 ACM SIGCOMM workshop on Economics of peer-to-peer systems}, organization = {ACM}, year = {2005}, month = {August}, address = {Philadelphia, Pennsylvania, USA}, pages = {111--115}, publisher = {ACM}, series = {P2PECON '05}, abstract = {We collect BitTorrent usage data across multiple file-sharing communities and analyze the factors that affect users' cooperative behavior. We find evidence that the design of the BitTorrent protocol results in increased cooperative behavior over other P2P protocols used to share similar content (e.g. Gnutella). We also investigate two additional community-specific mechanisms that foster even more cooperation}, www_section = {BitTorrent, cooperation, P2P}, isbn = {1-59593-026-4}, doi = {http://doi.acm.org/10.1145/1080192.1080198}, url = {http://doi.acm.org/10.1145/1080192.1080198}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/coopbittorrentcom_0.pdf}, }
@article{Androutsellis-Theotokis:2004:SPC:1041680.1041681, title = {A survey of peer-to-peer content distribution technologies}, author = {Androutsellis-Theotokis, Stephanos and Spinellis, Diomidis}, journal = {ACM Computing Surveys}, volume = {36}, year = {2004}, month = {December}, address = {New York, NY, USA}, pages = {335--371}, publisher = {ACM}, abstract = {Distributed computer architectures labeled "peer-to-peer" are designed for the sharing of computer resources (content, storage, CPU cycles) by direct exchange, rather than requiring the intermediation or support of a centralized server or authority. Peer-to-peer architectures are characterized by their ability to adapt to failures and accommodate transient populations of nodes while maintaining acceptable connectivity and performance.Content distribution is an important peer-to-peer application on the Internet that has received considerable research attention. Content distribution applications typically allow personal computers to function in a coordinated manner as a distributed storage medium by contributing, searching, and obtaining digital content.In this survey, we propose a framework for analyzing peer-to-peer content distribution technologies. Our approach focuses on nonfunctional characteristics such as security, scalability, performance, fairness, and resource management potential, and examines the way in which these characteristics are reflected in---and affected by---the architectural design decisions adopted by current peer-to-peer systems.We study current peer-to-peer systems and infrastructure technologies in terms of their distributed object location and routing mechanisms, their approach to content replication, caching and migration, their support for encryption, access control, authentication and identity, anonymity, deniability, accountability and reputation, and their use of resource trading and management schemes}, www_section = {content distribution, distributed hash table, DOLR, grid computing, P2P, peer-to-peer networking}, issn = {0360-0300}, doi = {http://doi.acm.org/10.1145/1041680.1041681}, url = {http://doi.acm.org/10.1145/1041680.1041681}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/ACM\%20Computing\%20Surveys\%20-\%20A\%20survey\%20of\%20p2p\%20content\%20distribution\%20technologies.pdf}, }
@conference{Antoniadis04anasymptotically, title = {An Asymptotically Optimal Scheme for P2P File Sharing}, author = {Panayotis Antoniadis and Costas Courcoubetis and Richard Weber}, booktitle = {2nd Workshop on the Economics of Peer-to-Peer Systems}, year = {2004}, month = {January}, address = {Harvard University}, abstract = {The asymptotic analysis of certain public good models for p2p systems suggests that when the aim is to maximize social welfare a fixed contribution scheme in terms of the number of files shared can be asymptotically optimal as the number of participants grows to infinity. Such a simple scheme eliminates free riding, is incentive compatible and obtains a value of social welfare that is within o(n) of that obtained by the second-best policy of the corresponding mechanism design formulation of the problem. We extend our model to account for file popularity, and discuss properties of the resulting equilibria. The fact that a simple optimization problem can be used to closely approximate the solution of the exact model (which is in most cases practically intractable both analytically and computationally), is of great importance for studying several interesting aspects of the system. We consider the evolution of the system to equilibrium in its early life, when both peers and the system planner are still learning about system parameters. We also analyse the case of group formation when peers belong to different classes (such as DSL and dial-up users), and it may be to their advantage to form distinct groups instead of a larger single group, or form such a larger group but avoid disclosing their class. We finally discuss the game that occurs when peers know that a fixed fee will be used, but the distribution of their valuations is unknown to the system designer}, www_section = {asymptotically optimal, P2P, sharing}, url = {http://www.eecs.harvard.edu/p2pecon/confman/papers}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/optimalscheme04.pdf}, }
@mastersthesis{Aspelund05retrivabilityof, title = {Retrivability of data in ad-hoc backup}, author = {Trond Aspelund}, school = {Oslo University}, volume = {Master}, year = {2005}, type = {Master thesis}, abstract = {This master thesis looks at aspects with backup of data and restore in ad-hoc networks. Ad-hoc networks are networks made between arbitrary nodes without any form of infrastructure or central control. Backup in such environments would have to rely on other nodes to keep backups. The key problem is knowing whom to trust. Backup in ad-hoc network is meant to be a method to offer extra security to data that is created outside of a controlled environment. The most important aspects of backup are the ability to retrieve data after it is lost from the original device. In this project an ad-hoc network is simulated, to measure how much of the data can be retrieved as a function of the size of the network. The distance to the data and how many of the distributed copies are available is measured. The network is simulated using User-mode Linux and the centrality and connectivity of the simulated network is measured. Finding the device that keeps your data when a restoration is needed can be like looking for a needle in a haystack. A simple solution to this is to not only rely on the ad-hoc network but also make it possible for devices that keep backups to upload data to others or back to a host that is available to the source itself}, www_section = {ad-hoc networks}, url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.106.141}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Retrivability_of_data_in_ad-hoc_backup.pdf}, }
@conference{Atallah2006, title = {Secure Collaborative Planning, Forecasting, and Replenishment}, author = {Atallah, Mikhail and Marina Blanton and Vinayak Deshpand and Frikken, Keith and Li, Jiangtao and Leroy Schwarz}, booktitle = {Proceedings of Multi-Echelon/Public Applications of Supply Chain Management Conference}, year = {2006}, pages = {1--52}, note = {only published on CD}, abstract = {Although the benefits of information sharing between supply-chain partners are well known, many companies are averse to share their {\textquotedblleft}private{\textquotedblright} information due to fear of adverse impact of information leakage. This paper uses techniques from Secure Multiparty Computation (SMC) to develop {\textquotedblleft}secure protocols{\textquotedblright} for the CPFR (Collaborative Planning, Forecasting, and Replenishment) business process. The result is a process that permits supply-chain partners to capture all of the benefits of information-sharing and collaborative decision-making, but without disclosing their {\textquotedblleft}private{\textquotedblright} demandsignal (e.g., promotions) and cost information to one another. In our collaborative CPFR) scenario, the retailer and supplier engage in SMC protocols that result in: (1) a forecast that uses both the retailers and the suppliers observed demand signals to better forecast demand; and (2) prescribed order/shipment quantities based on system-wide costs and inventory levels (and on the joint forecasts) that minimize supply-chain expected cost/period. Our contributions are as follows: (1) we demonstrate that CPFR can be securely implemented without disclosing the private information of either partner; (2) we show that the CPFR business process is not incentive compatible without transfer payments and develop an incentive-compatible linear transfer-payment scheme for collaborative forecasting; (3) we demonstrate that our protocols are not only secure (i.e., privacy preserving), but that neither partner is able to make accurate inferences about the others future demand signals from the outputs of the protocols; and (4) we illustrate the benefits of secure collaboration using simulation}, www_section = {chain computation management, CPFR, privacy, secure multi-party computation, secure supply, security, SMC}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Secure\%20Collaborative\%20Planning\%20Forecasting\%20and\%20Replenishment.pdf}, }
@conference{Atallah:2004:PCF:1029179.1029204, title = {Private collaborative forecasting and benchmarking}, author = {Atallah, Mikhail and Bykova, Marina and Li, Jiangtao and Frikken, Keith and Topkara, Mercan}, booktitle = {WPES'04--Proceedings of the 2004 ACM workshop on Privacy in the electronic society}, organization = {ACM}, year = {2004}, month = {October}, address = {Washington, DC, USA}, pages = {103--114}, publisher = {ACM}, series = {WPES '04}, abstract = {Suppose a number of hospitals in a geographic area want to learn how their own heart-surgery unit is doing compared with the others in terms of mortality rates, subsequent complications, or any other quality metric. Similarly, a number of small businesses might want to use their recent point-of-sales data to cooperatively forecast future demand and thus make more informed decisions about inventory, capacity, employment, etc. These are simple examples of cooperative benchmarking and (respectively) forecasting that would benefit all participants as well as the public at large, as they would make it possible for participants to avail themselves of more precise and reliable data collected from many sources, to assess their own local performance in comparison to global trends, and to avoid many of the inefficiencies that currently arise because of having less information available for their decision-making. And yet, in spite of all these advantages, cooperative benchmarking and forecasting typically do not take place, because of the participants' unwillingness to share their information with others. Their reluctance to share is quite rational, and is due to fears of embarrassment, lawsuits, weakening their negotiating position (e.g., in case of over-capacity), revealing corporate performance and strategies, etc. The development and deployment of private benchmarking and forecasting technologies would allow such collaborations to take place without revealing any participant's data to the others, reaping the benefits of collaboration while avoiding the drawbacks. Moreover, this kind of technology would empower smaller organizations who could then cooperatively base their decisions on a much broader information base, in a way that is today restricted to only the largest corporations. This paper is a step towards this goal, as it gives protocols for forecasting and benchmarking that reveal to the participants the desired answers yet do not reveal to any participant any other participant's private data. We consider several forecasting methods, including linear regression and time series techniques such as moving average and exponential smoothing. One of the novel parts of this work, that further distinguishes it from previous work in secure multi-party computation, is that it involves floating point arithmetic, in particular it provides protocols to securely and efficiently perform division}, www_section = {benchmarking, e-commerce, forecasting, privacy, secure multi-party computation, secure protocol, SMC}, isbn = {1-58113-968-3}, doi = {10.1145/1029179.1029204}, url = {http://doi.acm.org/10.1145/1029179.1029204}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/WPES\%2704\%20-\%20Forecasting\%20and\%20benchamking.pdf}, }
@conference{AthanRAM07, title = {GAS: Overloading a File Sharing Network as an Anonymizing System}, author = {Elias Athanasopoulos and Mema Roussopoulos and Kostas G. Anagnostakis and Evangelos P. Markatos}, booktitle = {Proceedings of Second International Workshop on Security, (IWSEC 2007)}, organization = {Springer Berlin / Heidelberg}, year = {2007}, publisher = {Springer Berlin / Heidelberg}, abstract = {Anonymity is considered as a valuable property as far as everyday transactions in the Internet are concerned. Users care about their privacy and they seek for new ways to keep secret as much as of their personal information from third parties. Anonymizing systems exist nowadays that provide users with the technology, which is able to hide their origin when they use applications such as the World Wide Web or Instant Messaging. However, all these systems are vulnerable to a number of attacks and some of them may collapse under a low strength adversary. In this paper we explore anonymity from a different perspective. Instead of building a new anonymizing system, we try to overload an existing file sharing system, Gnutella, and use it for a different purpose. We develop a technique that transforms Gnutella as an Anonymizing System (GAS) for a single download from the World Wide Web}, www_section = {anonymity, Gnutella}, isbn = {978-3-540-75650-7}, doi = {10.1007/978-3-540-75651-4}, url = {http://www.springerlink.com/content/8120788t0l354vj6/}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/AthanRAM07.pdf}, }
@conference{Attrapadung:2009:AES:1696791.1696811, title = {Attribute-Based Encryption Supporting Direct/Indirect Revocation Modes}, author = {Attrapadung, Nuttapong and Imai, Hideki}, booktitle = {Proceedings of the 12th IMA International Conference on Cryptography and Coding}, organization = {Springer-Verlag}, year = {2009}, month = {December}, address = {Cirencester, UK}, pages = {278--300}, publisher = {Springer-Verlag}, series = {Cryptography and Coding '09}, abstract = {Attribute-based encryption (ABE) enables an access control mechanism over encrypted data by specifying access policies among private keys and ciphertexts. In this paper, we focus on ABE that supports revocation. Currently, there are two available revocable ABE schemes in the literature. Their revocation mechanisms, however, differ in the sense that they can be considered as direct and indirect methods. Direct revocation enforces revocation directly by the sender who specifies the revocation list while encrypting. Indirect revocation enforces revocation by the key authority who releases a key update material periodically in such a way that only non-revoked users can update their keys (hence, revoked users' keys are implicitly rendered useless). An advantage of the indirect method over the direct one is that it does not require senders to know the revocation list. In contrast, an advantage of the direct method over the other is that it does not involve key update phase for all non-revoked users interacting with the key authority. In this paper, we present the first Hybrid Revocable ABE scheme that allows senders to select on-the-fly when encrypting whether to use either direct or indirect revocation mode; therefore, it combines best advantages from both methods}, isbn = {978-3-642-10867-9}, doi = {http://dx.doi.org/10.1007/978-3-642-10868-6_17}, url = {http://dx.doi.org/10.1007/978-3-642-10868-6_17}, www_section = {Unsorted}, }
@conference{Awerbuch04robustdistributed, title = {Robust Distributed Name Service}, author = {Awerbuch, Baruch}, booktitle = {In Proc. of the 3rd International Workshop on Peer-to-Peer Systems (IPTPS)}, year = {2004}, pages = {1--8}, url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.142.4900}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/awerbuch-robust.pdf}, %%%%% ERROR: Missing field % www_section = {?????}, }
@conference{BM:mixencrypt, title = {Provably Secure Public-Key Encryption for Length-Preserving Chaumian Mixes}, author = {Bodo M{\"o}ller}, booktitle = {Proceedings of CT-RSA 2003}, organization = {Springer-Verlag, LNCS 2612}, year = {2003}, month = {April}, publisher = {Springer-Verlag, LNCS 2612}, abstract = {Mix chains as proposed by Chaum allow sending untraceable electronic e-mail without requiring trust in a single authority: messages are recursively public-key encrypted to multiple intermediates (mixes), each of which forwards the message after removing one layer of encryption. To conceal as much information as possible when using variable (source routed) chains, all messages passed to mixes should be of the same length; thus, message length should not decrease when a mix transforms an input message into the corresponding output message directed at the next mix in the chain. Chaum described an implementation for such length-preserving mixes, but it is not secure against active attacks. We show how to build practical cryptographically secure lengthpreserving mixes. The conventional de nition of security against chosen ciphertext attacks is not applicable to length-preserving mixes; we give an appropriate de nition and show that our construction achieves provable security}, www_section = {mix chain, public key cryptography}, url = {http://eprints.kfupm.edu.sa/59837/}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/BM-mixencrypt.pdf}, }
@article{Badishi:2009:DFC:1550962.1551186, title = {Deleting files in the Celeste peer-to-peer storage system}, author = {Badishi, Gal and Caronni, Germano and Keidar, Idit and Rom, Raphael and Scott, Glenn}, journal = {Journal of Parallel and Distributed Computing}, volume = {69}, year = {2009}, month = {July}, address = {Orlando, FL, USA}, pages = {613--622}, publisher = {Academic Press, Inc}, abstract = {Celeste is a robust peer-to-peer object store built on top of a distributed hash table (DHT). Celeste is a working system, developed by Sun Microsystems Laboratories. During the development of Celeste, we faced the challenge of complete object deletion, and moreover, of deleting ''files'' composed of several different objects. This important problem is not solved by merely deleting meta-data, as there are scenarios in which all file contents must be deleted, e.g., due to a court order. Complete file deletion in a realistic peer-to-peer storage system has not been previously dealt with due to the intricacy of the problem--the system may experience high churn rates, nodes may crash or have intermittent connectivity, and the overlay network may become partitioned at times. We present an algorithm that eventually deletes all file contents, data and meta-data, in the aforementioned complex scenarios. The algorithm is fully functional and has been successfully integrated into Celeste}, www_section = {Celeste, fault-tolerance, peer-to-peer networking, storage}, issn = {0743-7315}, doi = {10.1016/j.jpdc.2009.03.003}, url = {http://dl.acm.org/citation.cfm?id=1550962.1551186}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Journal\%20of\%20Parallel\%20\%26\%20Distributed\%20Computing\%20-\%20Deleting\%20files\%20in\%20the\%20Celeste\%20p2p\%20storage\%20systems.pdf}, }
@article{Banner:2007:MRA:1279660.1279673, title = {Multipath routing algorithms for congestion minimization}, author = {Banner, Ron and Orda, Ariel}, journal = {IEEE/ACM Trans. Netw}, volume = {15}, year = {2007}, month = {April}, address = {Piscataway, NJ, USA}, pages = {413--424}, publisher = {IEEE Press}, abstract = {Unlike traditional routing schemes that route all traffic along a single path, multipath routing strategies split the traffic among several paths in order to ease congestion. It has been widely recognized that multipath routing can be fundamentally more efficient than the traditional approach of routing along single paths. Yet, in contrast to the single-path routing approach, most studies in the context of multipath routing focused on heuristic methods. We demonstrate the significant advantage of optimal (or near optimal) solutions. Hence, we investigate multipath routing adopting a rigorous (theoretical) approach. We formalize problems that incorporate two major requirements of multipath routing. Then, we establish the intractability of these problems in terms of computational complexity. Finally, we establish efficient solutions with proven performance guarantees}, www_section = {computer networks, congestion avoidance, routing protocols}, issn = {1063-6692}, doi = {http://dx.doi.org/10.1109/TNET.2007.892850}, url = {http://dx.doi.org/10.1109/TNET.2007.892850}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/IEEE\%E2\%81\%84ACM\%20Banner\%20\%26\%20Orda.pdf}, }
@conference{Barreto04areplicated, title = {A Replicated File System for Resource Constrained Mobile Devices}, author = {Jo{\~a}o Barreto and Paulo Ferreira}, booktitle = {Proceedings of IADIS Applied Computing}, year = {2004}, abstract = {The emergence of more powerful and resourceful mobile devices, as well as new wireless communication technologies, is turning the concept of ad-hoc networking into a viable and promising possibility for ubiquitous information sharing. However, the inherent characteristics of ad-hoc networks bring up new challenges for which most conventional systems don't provide an appropriate response. Namely, the lack of a pre-existing infrastructure, the high topological dynamism of these networks, the relatively low bandwidth of wireless links, as well as the limited storage and energy resources of mobile devices are issues that strongly affect the efficiency of any distributed system intended to provide ubiquitous information sharing. In this paper we describe Haddock-FS, a transparent replicated file system designed to support collaboration in the novel usage scenarios enabled by mobile environments. Haddock-FS is based on a highly available optimistic consistency protocol. In order to effectively cope with the network bandwidth and device memory constraints of these environments, Haddock-FS employs a limited size log truncation scheme and a cross-file, cross-version content similarity exploitation mechanism}, www_section = {ad-hoc networks, ubiquitous computing}, url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.144.9141}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.144.9141.pdf}, }
@book{Bartolini:2005:SFA:2167504.2167521, title = {A software framework for automated negotiation}, author = {Bartolini, Claudio and Preist, Chris and Nicholas R Jennings}, booktitle = {Software Engineering for Multi-Agent Systems III}, organization = {Springer-Verlag}, volume = {3390}, year = {2005}, address = {Berlin, Heidelberg}, chapter = {A software framework for automated negotiation}, pages = {213--235}, editor = {Choren, Ricardo and Garcia, Alessandro and Lucena, Carlos and Romanovsky, Alexander}, publisher = {Springer-Verlag}, series = {Lecture Notes in Computer Science}, abstract = {If agents are to negotiate automatically with one another they must share a negotiation mechanism, specifying what possible actions each party can take at any given time, when negotiation terminates, and what is the structure of the resulting agreements. Current standardization activities such as FIPA [2] and WS-Agreement [3] represent this as a negotiation protocol specifying the flow of messages. However, they omit other aspects of the rules of negotiation (such as obliging a participant to improve on a previous offer), requiring these to be represented implicitly in an agent's design, potentially resulting incompatibility, maintenance and re-usability problems. In this chapter, we propose an alternative approach, allowing all of a mechanism to be formal and explicit. We present (i) a taxonomy of declarative rules which can be used to capture a wide variety of negotiation mechanisms in a principled and well-structured way; (ii) a simple interaction protocol, which is able to support any mechanism which can be captured using the declarative rules; (iii) a software framework for negotiation that allows agents to effectively participate in negotiations defined using our rule taxonomy and protocol and (iv) a language for expressing aspects of the negotiation based on OWL-Lite [4]. We provide examples of some of the mechanisms that the framework can support}, www_section = {framework, negotiation}, isbn = {3-540-24843-9}, url = {http://dl.acm.org/citation.cfm?id=2167504.2167521}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/SELMAS\%20-\%20Software\%20framework\%20for\%20automated\%20negotiation.pdf}, }
@conference{Baset04ananalysis, title = {An Analysis of the Skype Peer-to-Peer Internet Telephony Protocol}, author = {Salman A. Baset and Henning G. Schulzrinne}, booktitle = {INFOCOM 2006. Proceedings of the 25th Annual Joint Conference of the IEEE Computer and Communications Societies}, year = {2004}, month = {April}, address = {Barcelona, Catalunya, Spain}, abstract = {Skype is a peer-to-peer VoIP client developed by KaZaa in 2003. Skype claims that it can work almost seamlessly across NATs and firewalls and has better voice quality than the MSN and Yahoo IM applications. It encrypts calls end-to-end, and stores user information in a decentralized fashion. Skype also supports instant messaging and conferencing. This report analyzes key Skype functions such as login, NAT and firewall traversal, call establishment, media transfer, codecs, and conferencing under three different network setups. Analysis is performed by careful study of Skype network traffic}, www_section = {P2P, VoIP}, url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.84.2433}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/cucs-039-04.pdf}, }
@booklet{Batten01pstore:a, title = {pStore: A Secure Peer-to-Peer Backup System}, author = {Christopher Batten and Kenneth Barr and Arvind Saraf and Stanley Trepetin}, year = {2001}, abstract = {In an effort to combine research in peer-to-peer systems with techniques for incremental backup systems, we propose pStore: a secure distributed backup system based on an adaptive peer-to-peer network. pStore exploits unused personal hard drive space attached to the Internet to provide the distributed redundancy needed for reliable and effective data backup. Experiments on a 30 node network show that 95\% of the files in a 13 MB dataset can be retrieved even when 7 of the nodes have failed. On top of this reliability, pStore includes support for file encryption, versioning, and secure sharing. Its custom versioning system permits arbitrary version retrieval similar to CVS. pStore provides this functionality at less than 10\% of the network bandwidth and requires 85\% less storage capacity than simpler local tape backup schemes for a representative workload}, www_section = {P2P, robustness}, url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.12.3444}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.12.3444.pdf}, }
@conference{Bauer03newcovert, title = {New Covert Channels in HTTP: Adding Unwitting Web Browsers to Anonymity Sets}, author = {Matthias Bauer}, booktitle = {In Proceedings of the Workshop on Privacy in the Electronic Society (WPES 2003)}, organization = {ACM Press}, year = {2003}, pages = {72--78}, publisher = {ACM Press}, abstract = {This paper presents new methods enabling anonymous communication on the Internet. We describe a new protocol that allows us to create an anonymous overlay network by exploiting the web browsing activities of regular users. We show that the overlay network provides an anonymity set greater than the set of senders and receivers in a realistic threat model. In particular, the protocol provides unobservability in our threat model}, url = {http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.4.6246\&rep=rep1\&type=pdf}, %%%%% ERROR: Missing field % www_section = {?????}, }
@conference{BecchiCrowley2008EfficientRegexEval, title = {Efficient regular expression evaluation: theory to practice}, author = {Becchi, Michela and Crowley, Patrick}, booktitle = {Proceedings of the 4th ACM/IEEE Symposium on Architectures for Networking and Communications Systems}, organization = {ACM}, year = {2008}, address = {New York, NY, USA}, pages = {50--59}, publisher = {ACM}, series = {ANCS '08}, isbn = {978-1-60558-346-4}, doi = {10.1145/1477942.1477950}, url = {http://doi.acm.org/10.1145/1477942.1477950}, %%%%% ERROR: Missing field % www_section = {?????}, }
@article{Beimel01busesfor, title = {Buses for Anonymous Message Delivery}, author = {Amos Beimel and Shlomi Dolev}, journal = {Journal of Cryptology}, volume = {16}, year = {2001}, pages = {0--2003}, abstract = {Applies graph theory to anonymity. The paper suffers from the fundamental problem that it does not discuss attacks on the scheme, and there are a couple of pretty basic ways to break anonymity. Also, the scheme uses lots of traffic; some variants end up looking much like a pipenet}, url = {http://gecko.cs.purdue.edu/gnet/papers/BD.pdf}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/BD.pdf}, %%%%% ERROR: Missing field % www_section = {?????}, }
@booklet{Bellovin2007, title = {Privacy-enhanced searches using encrypted Bloom filters}, author = {Bellovin, Steven M. and Cheswick, William R.}, year = {2007}, pages = {1--16}, publisher = {Columbia University CUCS-034-07}, %%%%% ERROR: Missing field % www_section = {?????}, }
@conference{Ben-David:2008:FSS:1455770.1455804, title = {FairplayMP: a system for secure multi-party computation}, author = {Ben-David, Assaf and Nisan, Noam and Pinkas, Benny}, booktitle = {CCS'08--Proceedings of the 15th ACM conference on Computer and communications security}, organization = {ACM}, year = {2008}, month = {October}, address = {Alexandria, VA, USA}, pages = {257--266}, publisher = {ACM}, series = {CCS '08}, abstract = {We present FairplayMP (for "Fairplay Multi-Party"), a system for secure multi-party computation. Secure computation is one of the great achievements of modern cryptography, enabling a set of untrusting parties to compute any function of their private inputs while revealing nothing but the result of the function. In a sense, FairplayMP lets the parties run a joint computation that emulates a trusted party which receives the inputs from the parties, computes the function, and privately informs the parties of their outputs. FairplayMP operates by receiving a high-level language description of a function and a configuration file describing the participating parties. The system compiles the function into a description as a Boolean circuit, and perform a distributed evaluation of the circuit while revealing nothing else. FairplayMP supplements the Fairplay system [16], which supported secure computation between two parties. The underlying protocol of FairplayMP is the Beaver-Micali-Rogaway (BMR) protocol which runs in a constant number of communication rounds (eight rounds in our implementation). We modified the BMR protocol in a novel way and considerably improved its performance by using the Ben-Or-Goldwasser-Wigderson (BGW) protocol for the purpose of constructing gate tables. We chose to use this protocol since we believe that the number of communication rounds is a major factor on the overall performance of the protocol. We conducted different experiments which measure the effect of different parameters on the performance of the system and demonstrate its scalability. (We can now tell, for example, that running a second-price auction between four bidders, using five computation players, takes about 8 seconds.)}, www_section = {cryptography, secure multi-party computation, SMC}, isbn = {978-1-59593-810-7}, doi = {10.1145/1455770.1455804}, url = {http://doi.acm.org/10.1145/1455770.1455804}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/CCS\%2708\%20-\%20FairplayMP.pdf}, }
@conference{Bharambe:2005:OBP:1064212.1064273, title = {Some observations on BitTorrent performance}, author = {Bharambe, Ashwin R. and Herley, Cormac and Venkata N. Padmanabhan}, booktitle = {Proceedings of the 2005 ACM SIGMETRICS International Conference on Measurement and Modeling of Computer Systems}, organization = {ACM}, year = {2005}, month = {June}, address = {New York, NY, USA}, pages = {398--399}, publisher = {ACM}, series = {SIGMETRICS '05}, abstract = {In this paper, we present a simulation-based study of BitTorrent. Our results confirm that BitTorrent performs near-optimally in terms of uplink bandwidth utilization and download time, except under certain extreme conditions. On fairness, however, our work shows that low bandwidth peers systematically download more than they upload to the network when high bandwidth peers are present. We find that the rate-based tit-for-tat policy is not effective in preventing unfairness. We show how simple changes to the tracker and a stricter, block-based tit-for-tat policy, greatly improves fairness, while maintaining high utilization}, www_section = {bandwidth utilization, BitTorrent, fairness}, isbn = {1-59593-022-1}, doi = {http://doi.acm.org/10.1145/1064212.1064273}, url = {http://doi.acm.org/10.1145/1064212.1064273}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/SIGMETRICS\%2705\%20-\%20Bharambe\%2C\%20Herley\%20\%26\%20Padmanabhan.pdf}, }
@article{Bickson05theemule, title = {The eMule Protocol Specification}, author = {Yoram Kulbak and Danny Bickson}, journal = {unknown}, institution = {Leibniz Center, School of Computer Science and Engineering, The Hebrew University}, number = {TR-2005-03}, year = {2005}, month = {January}, address = {Jerusalem, Israel}, type = {Tech report}, abstract = {this document under the terms of the GNU Free Documentation License, Version 1.2 or any later version published by the Free Software Foundation; with no Invariant Sections, no Front-Cover Texts, and no Back-Cover Texts. A copy of the license is included in the section entitle "GNU Free Documentation License"}, url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.60.7750}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.60.7750_0.pdf}, %%%%% ERROR: Missing field % www_section = {?????}, }
@conference{Binzenhofer:2007:ECS:1769187.1769257, title = {Estimating churn in structured P2P networks}, author = {Binzenh{\"o}fer, Andreas and Leibnitz, Kenji}, booktitle = {ITC-20'07--Proceedings of the 20th International Teletraffic Conference on Managing Traffic Performance in Converged Networks}, organization = {Springer-Verlag}, year = {2007}, month = {June}, address = {Ottawa, Canada}, pages = {630--641}, publisher = {Springer-Verlag}, series = {ITC20'07}, abstract = {In structured peer-to-peer (P2P) networks participating peers can join or leave the system at arbitrary times, a process which is known as churn. Many recent studies revealed that churn is one of the main problems faced by any Distributed Hash Table (DHT). In this paper we discuss different possibilities of how to estimate the current churn rate in the system. In particular, we show how to obtain a robust estimate which is independent of the implementation details of the DHT. We also investigate the trade-offs between accuracy, overhead, and responsiveness to changes}, www_section = {churn, distributed hash table, P2P, peer-to-peer networking}, isbn = {978-3-540-72989-1}, url = {http://dl.acm.org/citation.cfm?id=1769187.1769257}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/ITC-20\%2707\%20-\%20Estimating\%20churn\%20in\%20structured\%20p2p\%20networks.pdf}, }
@conference{Blake:2003:HAS:1251054.1251055, title = {High Availability, Scalable Storage, Dynamic Peer Networks: Pick Two}, author = {Blake, Charles and Rodrigues, Rodrigo}, booktitle = {HotOS IX--Proceedings of the 9th conference on Hot Topics in Operating Systems}, organization = {USENIX Association}, year = {2003}, month = {May}, address = {Lihue, Hawaii, USA}, pages = {1--1}, publisher = {USENIX Association}, abstract = {Peer-to-peer storage aims to build large-scale, reliable and available storage from many small-scale unreliable, low-availability distributed hosts. Data redundancy is the key to any data guarantees. However, preserving redundancy in the face of highly dynamic membership is costly. We use a simple resource usage model to measured behavior from the Gnutella file-sharing network to argue that large-scale cooperative storage is limited by likely dynamics and cross-system bandwidth -- not by local disk space. We examine some bandwidth optimization strategies like delayed response to failures, admission control, and load-shifting and find that they do not alter the basic problem. We conclude that when redundancy, data scale, and dynamics are all high, the needed cross-system bandwidth is unreasonable}, www_section = {distributed hosts, dynamic peer network, peer-to-peer storage, redundancy}, url = {http://dl.acm.org/citation.cfm?id=1251054.1251055}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/HotOS\%20IX\%20-\%20High\%20available\%2C\%20scalable\%20storage\%2C\%20dynamic\%20peer\%20networks.pdf}, }
@article{Bloom70space/timetrade-offs, title = {Space/Time Trade-offs in Hash Coding with Allowable Errors}, author = {Burton H. Bloom}, journal = {Communications of the ACM}, volume = {13}, year = {1970}, pages = {422--426}, abstract = {this paper trade-offs among certain computational factors in hash coding are analyzed. The paradigm problem considered is that of testing a series of messages one-by-one for membership in a given set of messages. Two new hash- coding methods are examined and compared with a particular conventional hash-coding method. The computational factors considered are the size of the hash area (space), the time required to identify a message as a nonmember of the given set (reject time), and an allowable error frequency}, www_section = {Bloom filter, compression}, url = {http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.20.2080\&rep=rep1\&type=pdf}, }
@book{Bogetoft:2009:SMC:1601990.1602018, title = {Financial Cryptography and Data Security}, author = {Bogetoft, Peter and Christensen, Dan Lund and Damg{\'a}rd, Ivan and Geisler, Martin and Jakobsen, Thomas and Kr{\o}igaard, Mikkel and Nielsen, Janus Dam and Nielsen, Jesper Buus and Nielsen, Kurt and Pagter, Jakob and Schwartzbach, Michael and Toft, Tomas}, booktitle = {Financial Cryptography and Data Security}, organization = {Springer-Verlag}, volume = {6052}, year = {2009}, address = {Berlin, Heidelberg}, chapter = {Secure Multiparty Computation Goes Live}, edition = {1st}, pages = {325--343}, editor = {Roger Dingledine and Philippe Golle}, publisher = {Springer-Verlag}, series = {Lecture Notes in Computer Science}, abstract = {This book constitutes the thoroughly refereed post-conference proceedings of the 14th International Conference on Financial Cryptography and Data Security, FC 2010, held in Tenerife, Canary Islands, Spain in January 2010. The 19 revised full papers and 15 revised short papers presented together with 1 panel report and 7 poster papers were carefully reviewed and selected from 130 submissions. The papers cover all aspects of securing transactions and systems and feature current research focusing on both fundamental and applied real-world deployments on all aspects surrounding commerce security}, www_section = {anonymous credentials, bilinear gruop, privacy, secret sharing, SMC, symbolic evaluation}, isbn = {978-3-642-03548-7}, doi = {10.1007/978-3-642-03549-4_20}, url = {http://dx.doi.org/10.1007/978-3-642-03549-4_20}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Bogetoft\%20et\%20al.\%20-\%20Secure\%20multiparty\%20computation\%20goes\%20live.pdf}, }
@conference{Boldyreva:2008:IEE:1455770.1455823, title = {Identity-based encryption with efficient revocation}, author = {Boldyreva, Alexandra and Goyal, Vipul and Kumar, Virendra}, booktitle = {CCS'08--Proceedings of the 15th ACM Conference on Computer and Communications Security}, organization = {ACM}, year = {2008}, month = {October}, address = {Alexandria, VA, USA}, pages = {417--426}, publisher = {ACM}, series = {CCS '08}, abstract = {Identity-based encryption (IBE) is an exciting alternative to public-key encryption, as IBE eliminates the need for a Public Key Infrastructure (PKI). The senders using an IBE do not need to look up the public keys and the corresponding certificates of the receivers, the identities (e.g. emails or IP addresses) of the latter are sufficient to encrypt. Any setting, PKI- or identity-based, must provide a means to revoke users from the system. Efficient revocation is a well-studied problem in the traditional PKI setting. However in the setting of IBE, there has been little work on studying the revocation mechanisms. The most practical solution requires the senders to also use time periods when encrypting, and all the receivers (regardless of whether their keys have been compromised or not) to update their private keys regularly by contacting the trusted authority. We note that this solution does not scale well -- as the number of users increases, the work on key updates becomes a bottleneck. We propose an IBE scheme that significantly improves key-update efficiency on the side of the trusted party (from linear to logarithmic in the number of users), while staying efficient for the users. Our scheme builds on the ideas of the Fuzzy IBE primitive and binary tree data structure, and is provably secure}, www_section = {IBE, identity-based encryption, provable security, revocation}, isbn = {978-1-59593-810-7}, doi = {http://doi.acm.org/10.1145/1455770.1455823}, url = {http://doi.acm.org/10.1145/1455770.1455823}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/CCS\%2708\%20-\%20Identity-based\%20encryption\%20with\%20efficient\%20revocation.pdf}, }
@conference{BonehGolle:psp2002, title = {Almost Entirely Correct Mixing With Application to Voting}, author = {Dan Boneh and Philippe Golle}, booktitle = {Proceedings of the 9th ACM Conference on Computer and Communications Security (CCS 2002)}, organization = {ACM New York, NY, USA}, year = {2002}, month = {November}, address = {Washington, DC}, pages = {68--77}, editor = {Vijay Atluri}, publisher = {ACM New York, NY, USA}, abstract = {In order to design an exceptionally efficient mix network, both asymptotically and in real terms, we develop the notion of almost entirely correct mixing, and propose a new mix network that is almost entirely correct. In our new mix, the real cost of proving correctness is orders of magnitude faster than all other mix nets. The trade-off is that our mix only guarantees "almost entirely correct" mixing, i.e it guarantees that the mix network processed correctly all inputs with high (but not overwhelming) probability. We use a new technique for verifying correctness. This new technique consists of computing the product of a random subset of the inputs to a mix server, then require the mix server to produce a subset of the outputs of equal product. Our new mix net is of particular value for electronic voting, where a guarantee of almost entirely correct mixing may well be sufficient to announce instantly the result of a large election. The correctness of the result can later be verified beyond a doubt using any one of a number of much slower proofs of perfect-correctness, without having to mix the ballots again}, www_section = {electronic voting}, isbn = {1-58113-612-9}, doi = {10.1145/586110.586121}, url = {http://portal.acm.org/citation.cfm?id=586121}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/BonehGolle-psp2002.pdf}, }
@booklet{Borisov:CSD-05-1390, title = {Anonymity in Structured Peer-to-Peer Networks}, author = {Borisov, Nikita and Waddle, Jason}, number = {UCB/CSD-05-1390}, year = {2005}, month = {May}, publisher = {EECS Department, University of California, Berkeley}, abstract = {Existing peer-to-peer systems that aim to provide anonymity to its users are based on networks with unstructured or loosely-structured routing algorithms. Structured routing offers performance and robustness guarantees that these systems are unable to achieve. We therefore investigate adding anonymity support to structured peer-to-peer networks. We apply an entropy-based anonymity metric to Chord and use this metric to quantify the improvements in anonymity afforded by several possible extensions. We identify particular properties of Chord that have the strongest effect on anonymity and propose a routing extension that allows a general trade-off between anonymity and performance. Our results should be applicable to other structured peer-to-peer systems}, url = {http://www.eecs.berkeley.edu/Pubs/TechRpts/2005/6509.html}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/CSD-05-1390.pdf}, %%%%% ERROR: Missing field % www_section = {?????}, }
@booklet{Boulkenafed02adhocfs:sharing, title = {AdHocFS: Sharing Files in WLANs}, author = {Malika Boulkenafed and Valerie Issarny}, year = {2002}, abstract = {This paper presents the ADHOCFS file system for mobileusers, which realizes transparent, adaptive file accessaccording to the users' specific situations (e.g., device inuse, network connectivity, etc).The paper concentratesmore specifically on the support of ADHOCFS for collaborativefile sharing within ad hoc groups of trusted nodesthat are in the local communication of each other using theunderlying ad hoc network, which has not been addressedin the past}, www_section = {ad-hoc networks}, isbn = {0-7695-1938-5}, url = {http://portal.acm.org/citation.cfm?id=825345}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.13.9956.pdf}, }
@book{Broadening2013Chatzikokolakis, title = {Broadening the Scope of Differential Privacy Using Metrics}, author = {Chatzikokolakis, Konstantinos and Andr{\'e}s, MiguelE. and Bordenabe, Nicol{\'a}sEmilio and Palamidessi, Catuscia}, booktitle = {Privacy Enhancing Technologies}, organization = {Springer Berlin Heidelberg}, volume = {7981}, year = {2013}, pages = {82--102}, editor = {De Cristofaro, Emiliano and Wright, Matthew}, publisher = {Springer Berlin Heidelberg}, series = {Lecture Notes in Computer Science}, abstract = {Differential Privacy is one of the most prominent frameworks used to deal with disclosure prevention in statistical databases. It provides a formal privacy guarantee, ensuring that sensitive information relative to individuals cannot be easily inferred by disclosing answers to aggregate queries. If two databases are adjacent, i.e. differ only for an individual, then the query should not allow to tell them apart by more than a certain factor. This induces a bound also on the distinguishability of two generic databases, which is determined by their distance on the Hamming graph of the adjacency relation. In this paper we explore the implications of differential privacy when the indistinguishability requirement depends on an arbitrary notion of distance. We show that we can naturally express, in this way, (protection against) privacy threats that cannot be represented with the standard notion, leading to new applications of the differential privacy framework. We give intuitive characterizations of these threats in terms of Bayesian adversaries, which generalize two interpretations of (standard) differential privacy from the literature. We revisit the well-known results stating that universally optimal mechanisms exist only for counting queries: We show that, in our extended setting, universally optimal mechanisms exist for other queries too, notably sum, average, and percentile queries. We explore various applications of the generalized definition, for statistical databases as well as for other areas, such that geolocation and smart metering}, isbn = {978-3-642-39076-0}, doi = {10.1007/978-3-642-39077-7_5}, url = {http://dx.doi.org/10.1007/978-3-642-39077-7_5}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Brodening2013Chatzikokolakis.pdf}, www_section = {Unsorted}, }
@conference{Buchegger03theeffect, title = {The Effect of Rumor Spreading in Reputation Systems for Mobile Ad-Hoc Networks}, author = {Sonja Buchegger and Jean-Yves Le Boudec}, booktitle = {In Proceedings of WiOpt {\textquoteleft}03: Modeling and Optimization in Mobile, Ad Hoc and Wireless Networks{\textquotedblright}, Sophia-Antipolis}, year = {2003}, abstract = {Mobile ad-hoc networks rely on the cooperation of nodes for routing and forwarding. For individual nodes there are however several advantages resulting from noncooperation, the most obvious being power saving. Nodes that act selfishly or even maliciously pose a threat to availability in mobile ad-hoc networks. Several approaches have been proposed to detect noncooperative nodes. In this paper, we investigate the effect of using rumors with respect to the detection time of misbehaved nodes as well as the robustness of the reputation system against wrong accusations. We propose a Bayesian approach for reputation representation, updates, and view integration. We also present a mechanism to detect and exclude potential lies. The simulation results indicate that by using this Bayesian approach, the reputation system is robust against slander while still benefitting from the speed-up in detection time provided by the use of rumors}, www_section = {ad-hoc networks, reputation, robustness}, url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.13.9006}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.13.9006_0.pdf}, }
@conference{Burkhart:2010:SPA:1929820.1929840, title = {SEPIA: privacy-preserving aggregation of multi-domain network events and statistics}, author = {Burkhart, Martin and Strasser, Mario and Many, Dilip and Dimitropoulos, Xenofontas}, booktitle = {Proceedings of the 19th USENIX conference on Security}, organization = {USENIX Association}, year = {2010}, month = {August}, address = {Washington, DC, USA}, pages = {15--15}, publisher = {USENIX Association}, series = {USENIX Security'10}, abstract = {Secure multiparty computation (MPC) allows joint privacy-preserving computations on data of multiple parties. Although MPC has been studied substantially, building solutions that are practical in terms of computation and communication cost is still a major challenge. In this paper, we investigate the practical usefulness of MPC for multi-domain network security and monitoring. We first optimize MPC comparison operations for processing high volume data in near real-time. We then design privacy-preserving protocols for event correlation and aggregation of network traffic statistics, such as addition of volume metrics, computation of feature entropy, and distinct item count. Optimizing performance of parallel invocations, we implement our protocols along with a complete set of basic operations in a library called SEPIA. We evaluate the running time and bandwidth requirements of our protocols in realistic settings on a local cluster as well as on PlanetLab and show that they work in near real-time for up to 140 input providers and 9 computation nodes. Compared to implementations using existing general-purpose MPC frameworks, our protocols are significantly faster, requiring, for example, 3 minutes for a task that takes 2 days with general-purpose frameworks. This improvement paves the way for new applications of MPC in the area of networking. Finally, we run SEPIA's protocols on real traffic traces of 17 networks and show how they provide new possibilities for distributed troubleshooting and early anomaly detection}, www_section = {privacy, secure multi-party computation, SMC}, isbn = {888-7-6666-5555-4}, url = {http://dl.acm.org/citation.cfm?id=1929820.1929840}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/USENIX\%20Security\%2710\%20-\%20SEPIA.pdf}, }
@conference{Bustamante04wayback:a, title = {Wayback: A User-level Versioning File System for Linux}, author = {Fabian Bustamante and Brian Cornell and Brian Cornell and Peter Dinda and Peter Dinda and Fabian Bustamante}, booktitle = {In Proceedings of USENIX 2004 (Freenix Track)}, year = {2004}, abstract = {In a typical file system, only the current version of a file (or directory) is available. In Wayback, a user can also access any previous version, all the way back to the file's creation time. Versioning is done automatically at the write level: each write to the file creates a new version. Wayback implements versioning using an undo log structure, exploiting the massive space available on modern disks to provide its very useful functionality. Wayback is a user-level file system built on the FUSE framework that relies on an underlying file system for access to the disk. In addition to simplifying Wayback, this also allows it to extend any existing file system with versioning: after being mounted, the file system can be mounted a second time with versioning. We describe the implementation of Wayback, and evaluate its performance using several benchmarks}, www_section = {file systems, version control}, url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.11.2672}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.11.2672.pdf}, }
@conference{Byers02simpleload, title = {Simple Load Balancing for Distributed Hash Tables}, author = {Byers, John W. and Jeffrey Considine and Michael Mitzenmacher}, booktitle = {Simple Load Balancing for Distributed Hash Tables}, year = {2002}, pages = {80--87}, abstract = {Distributed hash tables have recently become a useful building block for a variety of distributed applications. However, current schemes based upon consistent hashing require both considerable implementation complexity and substantial storage overhead to achieve desired load balancing goals. We argue in this paper that these goals can be achieved more simply and more cost-effectively. First, we suggest the direct application of the power of two choices paradigm, whereby an item is stored at the less loaded of two (or more) random alternatives. We then consider how associating a small constant number of hash values with a key can naturally be extended to support other load balancing strategies, including load-stealing or load-shedding, as well as providing natural fault-tolerance mechanisms}, www_section = {distributed hash table, load balancing}, doi = {10.1007/b11823}, url = {http://www.springerlink.com/content/r9r4qcqxc2bmfqmr/}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.12.277.pdf}, }
@conference{CADET, title = {CADET: Confidential Ad-hoc Decentralized End-to-End Transport}, author = {Polot, Bartlomiej and Christian Grothoff}, booktitle = {Med-Hoc-Net 2014}, year = {2014}, month = {January}, abstract = {This paper describes CADET, a new transport protocol for confidential and authenticated data transfer in decentralized networks. This transport protocol is designed to operate in restricted-route scenarios such as friend-to-friend or ad-hoc wireless networks. We have implemented CADET and evaluated its performance in various network scenarios, compared it to the well-known TCP/IP stack and tested its response to rapidly changing network topologies. While our current implementation is still significantly slower in high-speed low-latency networks, for typical Internet-usage our system provides much better connectivity and security with comparable performance to TCP/IP}, www_section = {CADET, encryption, GNUnet, routing}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/cadet.pdf}, www_tags = {selected}, url = {https://bibliography.gnunet.org}, }
@conference{CANS2014camera-ready, title = {A Censorship-Resistant, Privacy-Enhancing and Fully Decentralized Name System}, author = {Matthias Wachs and Martin Schanzenbach and Christian Grothoff}, booktitle = {International Conference on Cryptology and Network Security (CANS)}, organization = {Springer Verlag}, year = {2014}, publisher = {Springer Verlag}, abstract = {The Domain Name System (DNS) is vital for access to information on the Internet. This makes it a target for attackers whose aim is to suppress free access to information. This paper introduces the design and implementation of the GNU Name System (GNS), a fully decentralized and censorship-resistant name system. GNS provides a privacy-enhancing alternative to DNS which preserves the desirable property of memorable names. Due to its design, it can also double as a partial replacement of public key infrastructures, such as X.509. The design of GNS incorporates the capability to integrate and coexist with DNS. GNS is based on the principle of a petname system and builds on ideas from the Simple Distributed Security Infrastructure (SDSI), addressing a central issue with the decentralized mapping of secure identifiers to memorable names: namely the impossibility of providing a global, secure and memorable mapping without a trusted authority. GNS uses the transitivity in the SDSI design to replace the trusted root with secure delegation of authority, thus making petnames useful to other users while operating under a very strong adversary model. In addition to describing the GNS design, we also discuss some of the mechanisms that are needed to smoothly integrate GNS with existing processes and procedures in Web browsers. Specifically, we show how GNS is able to transparently support many assumptions that the existing HTTP(S) infrastructure makes about globally unique names}, www_section = {DNS, GNU Name System, GNUnet, PKI}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/paper_cans2014_camera_ready.pdf}, www_tags = {selected}, url = {https://bibliography.gnunet.org}, }
@conference{CPIR, title = {Computationally private information retrieval (extended abstract)}, author = {Benny Chor and Niv Gilboa}, booktitle = {Proceedings of the twenty-ninth annual ACM symposium on Theory of Computing (STOC '97)}, organization = {ACM Press}, year = {1997}, address = {El Paso, TX, United States}, pages = {304--313}, publisher = {ACM Press}, abstract = {Private information retrieval (PIR) schemes enable a user to access k replicated copies of a database (k 2), and privately retrieve one of the n bits of data stored in the databases. This means that the queries give each individual database no partial information (in the information theoretic sense) on the identity of the item retrieved by the user. Today, the best two database scheme (k = 2) has communication complexity O(n 1=3 ), while for any constant number, k, the best k database scheme has communication complexity O(n 1=(2k\Gamma1) ). The motivation for the present work is the question whether this complexity can be reduced if one is willing to achieve computational privacy, rather than information theoretic privacy. (This means that privacy is guaranteed only with respect to databases that are restricted to polynomial time computations.) We answer this question affirmatively, and Computer Science Dept., Technion, Haifa, Israel}, www_section = {communication complexity, private information retrieval}, isbn = {0-89791-888-6}, doi = {http://doi.acm.org/10.1145/258533.258609}, url = {http://portal.acm.org/citation.cfm?id=258533.258609}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/chor97computationally.pdf}, }
@conference{Cabrera01herald:achieving, title = {Herald: Achieving a Global Event Notification Service}, author = {Luis Felipe Cabrera and Michael B. Jones and Marvin Theimer}, booktitle = {In HotOS VIII}, organization = {IEEE Computer Society}, year = {2001}, publisher = {IEEE Computer Society}, abstract = {This paper presents the design philosophy and initial design decisions of Herald: a highly scalable global event notification system that is being designed and built at Microsoft Research. Herald is a distributed system designed to transparently scale in all respects, including numbers of subscribers and publishers, numbers of event subscription points, and event delivery rates. Event delivery can occur within a single machine, within a local network or Intranet, and throughout the Internet}, url = {http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.20.672\&rep=rep1\&type=pdf}, %%%%% ERROR: Missing field % www_section = {?????}, }
@article{Cao:2002:IPG:508325.508330, title = {Internet pricing with a game theoretical approach: concepts and examples}, author = {Cao, Xi-Ren and Shen, Hong-Xia and Milito, Rodolfo and Wirth, Patrica}, journal = {IEEE/ACM Trans. Netw}, volume = {10}, year = {2002}, month = {April}, address = {Piscataway, NJ, USA}, pages = {208--216}, publisher = {IEEE Press}, abstract = {The basic concepts of three branches of game theory, leader-follower, cooperative, and two-person nonzero sum games, are reviewed and applied to the study of the Internet pricing issue. In particular, we emphasize that the cooperative game (also called the bargaining problem) provides an overall picture for the issue. With a simple model for Internet quality of service (QoS), we demonstrate that the leader-follower game may lead to a solution that is not Pareto optimal and in some cases may be "unfair," and that the cooperative game may provide a better solution for both the Internet service provider (ISP) and the user. The practical implication of the results is that government regulation or arbitration may be helpful. The QoS model is also applied to study the competition between two ISPs, and we find a Nash equilibrium point from which the two ISPs would not move out without cooperation. The proposed approaches can be applied to other Internet pricing problems such as the Paris Metro pricing scheme}, www_section = {bargaining problems, cooperative games, leader-follower games, Paris metro pricing, quality of services, two-person nonzero sum games}, issn = {1063-6692}, url = {http://dl.acm.org/citation.cfm?id=508325.508330}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/IEEE\%E2\%81\%84ACM\%20Trans.\%20Netw.\%2702\%20\%2810\%29-\%20Internet\%20pricing.pdf}, }
@conference{Capkun02smallworlds, title = {Small Worlds in Security Systems: an Analysis of the PGP Certificate Graph}, author = {Srdan Capkun and Levente Butty{\'a}n and Jean-Pierre Hubaux}, booktitle = {In Proceedings of The ACM New Security Paradigms Workshop}, organization = {ACM Press}, year = {2002}, pages = {28--35}, publisher = {ACM Press}, abstract = {We propose a new approach to securing self-organized mobile ad hoc networks. In this approach, security is achieved in a fully self-organized manner; by this we mean that the security system does not require any kind of certification authority or centralized server, even for the initialization phase. In our work, we were inspired by PGP [15] because its operation relies solely on the acquaintances between users. We show that the small-world phenomenon naturally emerges in the PGP system as a consequence of the self-organization of users. We show this by studying the PGP certificate graph properties and by quantifying its small-world characteristics. We argue that the certificate graphs of self-organized security systems will exhibit a similar small-world phenomenon, and we provide a way to model self-organized certificate graphs. The results of the PGP certificate graph analysis and graph modelling can be used to build new self-organized security systems and to test the performance of the existing proposals. In this work, we refer to such an example}, www_section = {PGP, public key management, self-organization, small-world}, isbn = {1-58113-598-X}, doi = {10.1145/844102.844108}, url = {http://portal.acm.org/citation.cfm?id=844102.844108}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.12.5408.pdf}, }
@booklet{Castro02exploitingnetwork, title = {Exploiting network proximity in peer-to-peer overlay networks}, author = {Miguel Castro and Peter Druschel and Y. Charlie Hu and Antony Rowstron}, year = {2002}, abstract = {The authors give an overview over various ways to use proximity information to optimize routing in peer-to-peer networks. Their study focuses on Pastry and describe in detail the protocols that are used in Pastry to build routing tables with neighbours that are close in terms of the underlying network. They give some analytical and extensive experimental evidence that the protocols are effective in reducing the length of the routing-path in terms of the link-to-link latency that their implementation uses to measure distance}, url = {http://www.research.microsoft.com/~antr/PAST/location.ps}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/location.pdf}, %%%%% ERROR: Missing field % www_section = {?????}, }
@conference{Castro02exploitingnetwork_0, title = {Exploiting network proximity in distributed hash tables}, author = {Miguel Castro and Peter Druschel and Y. Charlie Hu}, booktitle = {in International Workshop on Future Directions in Distributed Computing (FuDiCo)}, year = {2002}, pages = {52--55}, abstract = {Self-organizing peer-to-peer (p2p) overlay networks like CAN, Chord, Pastry and Tapestry (also called distributed hash tables or DHTs) offer a novel platform for a variety of scalable and decentralized distributed applications. These systems provide efficient and fault-tolerant routing, object location, and load balancing within a self-organizing overlay network. One important aspect of these systems is how they exploit network proximity in the underlying Internet. Three basic approaches have been proposed to exploit network proximity in DHTs, geographic layout, proximity routing and proximity neighbour selection. In this position paper, we briefly discuss the three approaches, contrast their strengths and shortcomings, and consider their applicability in the different DHT routing protocols. We conclude that proximity neighbor selection, when used in DHTs with prefixbased routing like Pastry and Tapestry, is highly effective and appears to dominate the other approaches}, www_section = {CAN, distributed hash table, P2P}, url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.126.3062}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/fudico.pdf}, }
@article{Castro02scribe:a, title = {SCRIBE: A large-scale and decentralized application-level multicast infrastructure}, author = {Miguel Castro and Peter Druschel and Anne-Marie Kermarrec and Antony Rowstron}, journal = {IEEE Journal on Selected Areas in Communications (JSAC)}, volume = {20}, year = {2002}, pages = {0--2002}, abstract = {This paper presents Scribe, a scalable application-level multicast infrastructure. Scribe supports large numbers of groups, with a potentially large number of members per group. Scribe is built on top of Pastry, a generic peer-to-peer object location and routing substrate overlayed on the Internet, and leverages Pastry's reliability, self-organization, and locality properties. Pastry is used to create and manage groups and to build efficient multicast trees for the dissemination of messages to each group. Scribe provides best-effort reliability guarantees, but we outline how an application can extend Scribe to provide stronger reliability. Simulation results, based on a realistic network topology model, show that Scribe scales across a wide range of groups and group sizes. Also, it balances the load on the nodes while achieving acceptable delay and link stress when compared to IP multicast}, www_section = {distributed hash table, multicast, Scribe}, url = {http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.20.299\&rep=rep1\&type=pdf}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/jsac.pdf}, }
@article{Castro:2003:SHM:1165389.945474, title = {SplitStream: high-bandwidth multicast in cooperative environments}, author = {Miguel Castro and Peter Druschel and Anne-Marie Kermarrec and Nandi, Animesh and Antony Rowstron and Singh, Atul}, journal = {SIGOPS'03 Operating Systems Review}, volume = {37}, year = {2003}, month = {October}, address = {New York, NY, USA}, pages = {298--313}, publisher = {ACM}, abstract = {In tree-based multicast systems, a relatively small number of interior nodes carry the load of forwarding multicast messages. This works well when the interior nodes are highly-available, dedicated infrastructure routers but it poses a problem for application-level multicast in peer-to-peer systems. SplitStream addresses this problem by striping the content across a forest of interior-node-disjoint multicast trees that distributes the forwarding load among all participating peers. For example, it is possible to construct efficient SplitStream forests in which each peer contributes only as much forwarding bandwidth as it receives. Furthermore, with appropriate content encodings, SplitStream is highly robust to failures because a node failure causes the loss of a single stripe on average. We present the design and implementation of SplitStream and show experimental results obtained on an Internet testbed and via large-scale network simulation. The results show that SplitStream distributes the forwarding load among all peers and can accommodate peers with different bandwidth capacities while imposing low overhead for forest construction and maintenance}, www_section = {application-level multicast, content distribution, end-system multicast, peer-to-peer networking, video streaming}, issn = {0163-5980}, doi = {http://doi.acm.org/10.1145/1165389.945474}, url = {http://doi.acm.org/10.1145/1165389.945474}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/SIGOSP\%2703\%20-\%20Spitstream\%3A\%20High-bandwidth\%20multicast.pdf}, }
@conference{Cattaneo:2001:DIT:647054.715628, title = {The Design and Implementation of a Transparent Cryptographic File System for UNIX}, author = {Cattaneo, Giuseppe and Catuogno, Luigi and Sorbo, Aniello Del and Persiano, Pino}, booktitle = {Proceedings of the FREENIX Track: 2001 USENIX Annual Technical Conference}, organization = {USENIX Association}, year = {2001}, month = {June}, address = {Boston, Massachusetts, USA}, pages = {199--212}, publisher = {USENIX Association}, abstract = {Recent advances in hardware and communication technologies have made possible and cost e ective to share a file system among several machines over a local (but possibly also a wide) area network. One of the most successful and widely used such applications is Sun's Network File System (NFS). NFS is very simple in structure but assumes a very strong trust model: the user trusts the remote le system server (which might be running on a machine in di erent country) and a network with his/her data. It is easy to see that neither assumption is a very realistic one. The server (or anybody with superuser privileges) might very well read the data on its local lesytem and it is well known that the Internet or any local area network (e.g, Ethernet) is very easy to tap (see for example, Berkeley's tcpdump 7, 5] application program). Impersoni cation of users is also another security drawback of NFS. In fact, most of the permission checking over NFS are performed in the kernel of the client. In such a context a pirate can temporarely assign to his own workstation the Internet address of victim. Without secure RPC 9] no further authentication procedure is requested. From here on, the pirate can issue NFS requests presenting himself with any (false) uid and therefore accessing for reading and writing any private data on the server, even protected data. Given the above, a user seeking a certain level of security should take some measures. Possible solutions are to use either user-level cryptography or application level cryptography. A discussion of the drawbacks of these approaches is found in 4]. A better approach is to push encryption services into the operating system as done by M. Blaze in the design of his CFS 4]. In this paper, we propose a new cryptographic le system, which we call TCFS , as a suitable solution to the problem of privacy for distributed le system (see section 2.1). Our work improves on CFS by providing a deeper integration between the encryption service and the le system which results in a complete transparency of use to the user applications}, www_section = {crytographic file system, UNIX}, isbn = {1-880446-10-3}, url = {http://dl.acm.org/citation.cfm?id=647054.715628}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/FREENIX\%2701\%20-\%20A\%20transparent\%20cryptographic\%20file\%20system\%20for\%20UNIX.pdf}, }
@conference{ChatziPP07, title = {Probability of Error in Information-Hiding Protocols}, author = {Konstantinos Chatzikokolakis and Catuscia Palamidessi and Prakash Panangaden}, booktitle = {Proceedings of the 20th IEEE Computer Security Foundations Symposium (CSF20)}, year = {2007}, abstract = {Randomized protocols for hiding private information can fruitfully be regarded as noisy channels in the information-theoretic sense, and the inference of the concealed information can be regarded as a hypothesis-testing problem. We consider the Bayesian approach to the problem, and investigate the probability of error associated to the inference when the MAP (Maximum Aposteriori Probability) decision rule is adopted. Our main result is a constructive characterization of a convex base of the probability of error, which allows us to compute its maximum value (over all possible input distributions), and to identify upper bounds for it in terms of simple functions. As a side result, we are able to improve substantially the Hellman-Raviv and the Santhi-Vardy bounds expressed in terms of conditional entropy. We then discuss an application of our methodology to the Crowds protocol, and in particular we show how to compute the bounds on the probability that an adversary breaks anonymity}, www_section = {anonymity, privacy}, isbn = {0-7695-2819-8}, doi = {10.1109/CSF.2007.27}, url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.79.2620}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/ChatziPP07.pdf}, }
@booklet{Chen01poblano:a, title = {Poblano: A distributed trust model for peer-to-peer networks}, author = {Rita Chen and William Yeager}, year = {2001}, url = {http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.106.7489\&rep=rep1\&type=pdf}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.106.7489.pdf}, %%%%% ERROR: Missing field % www_section = {?????}, }
@article{Chen:2008:IRS:1331483.1331515, title = {Insight into redundancy schemes in DHTs}, author = {Chen, Guihai and Qiu, Tongqing and Wu, Fan}, journal = {Journal of Supercomputing}, volume = {43}, year = {2008}, month = {February}, address = {Hingham, MA, USA}, pages = {183--198}, publisher = {Kluwer Academic Publishers}, abstract = {In order to provide high data availability in peer-to-peer (P2P) DHTs, proper data redundancy schemes are required. This paper compares two popular schemes: replication and erasure coding. Unlike previous comparison, we take user download behavior into account. Furthermore, we propose a hybrid redundancy scheme, which shares user downloaded files for subsequent accesses and utilizes erasure coding to adjust file availability. Comparison experiments of three schemes show that replication saves more bandwidth than erasure coding, although it requires more storage space, when average node availability is higher than 47\%; moreover, our hybrid scheme saves more maintenance bandwidth with acceptable redundancy factor}, www_section = {distributed hash table, erasure coding, peer-to-peer networking, redundancy, Replication}, issn = {0920-8542}, doi = {10.1007/s11227-007-0126-4}, url = {http://dl.acm.org/citation.cfm?id=1331483.1331515}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Journal\%20of\%20Supercomputing\%20-\%20Insight\%20into\%20redundancy\%20schemes\%20in\%20DHTs.pdf}, }
@conference{Cheng:2005:SRM:1080192.1080202, title = {Sybilproof reputation mechanisms}, author = {Cheng, Alice and Eric Friedman}, booktitle = {Proceedings of the 2005 ACM SIGCOMM Workshop on Economics of Peer-to-Peer Systems}, organization = {ACM}, year = {2005}, month = {August}, address = {Philadelphia, PA}, pages = {128--132}, publisher = {ACM}, series = {P2PECON '05}, abstract = {Due to the open, anonymous nature of many P2P networks, new identities--or sybils--may be created cheaply and in large numbers. Given a reputation system, a peer may attempt to falsely raise its reputation by creating fake links between its sybils. Many existing reputation mechanisms are not resistant to these types of strategies.Using a static graph formulation of reputation, we attempt to formalize the notion of sybilproofness. We show that there is no symmetric sybilproof reputation function. For nonsymmetric reputations, following the notion of reputation propagation along paths, we give a general asymmetric reputation function based on flow and give conditions for sybilproofness}, www_section = {peer-to-peer networking, reputation, Sybil attack}, isbn = {1-59593-026-4}, doi = {http://doi.acm.org/10.1145/1080192.1080202}, url = {http://doi.acm.org/10.1145/1080192.1080202}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/ACM\%20SIGCOMM\%2705\%20-\%20Cheng\%20\%26\%20Friedman\%20-\%20Sybilproof\%20reputation\%20mechanisms.pdf}, }
@conference{Cholez:2009:ESA:1574663.1574671, title = {Evaluation of Sybil Attacks Protection Schemes in KAD}, author = {Cholez, Thibault and Chrisment, Isabelle and Festor, Olivier}, booktitle = {AIMS'09--Proceedings of the 3rd International Conference on Autonomous Infrastructure, Management and Security: Scalability of Networks and Services}, organization = {Springer-Verlag}, volume = {5637}, year = {2009}, month = {June}, address = {Enschede, The Netherlands}, pages = {70--82}, publisher = {Springer-Verlag}, series = {Lecture Notes in Computer Science}, abstract = {In this paper, we assess the protection mechanisms entered into recent clients to fight against the Sybil attack in KAD, a widely deployed Distributed Hash Table. We study three main mechanisms: a protection against flooding through packet tracking, an IP address limitation and a verification of identities. We evaluate their efficiency by designing and adapting an attack for several KAD clients with different levels of protection. Our results show that the new security rules mitigate the Sybil attacks previously launched. However, we prove that it is still possible to control a small part of the network despite the new inserted defenses with a distributed eclipse attack and limited resources}, www_section = {defense, distributed hash table, KAD, p2p network, security, Sybil attack}, isbn = {978-3-642-02626-3}, doi = {http://dx.doi.org/10.1007/978-3-642-02627-0_6}, url = {http://dx.doi.org/10.1007/978-3-642-02627-0_6}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/AIMS\%2709\%20-\%20Sybil\%20attacks\%20protection\%20schemes\%20in\%20KAD.pdf}, }
@conference{Clarke00freenet:a, title = {Freenet: A Distributed Anonymous Information Storage and Retrieval System}, author = {Ian Clarke and Sandberg, Oskar and Brandon Wiley and Theodore W. Hong}, booktitle = {Freenet: A Distributed Anonymous Information Storage and Retrieval System}, year = {2000}, pages = {46--66}, abstract = {We describe Freenet, an adaptive peer-to-peer network application that permits the publication, replication, and retrieval of data while protecting the anonymity of both authors and readers. Freenet operates as a network of identical nodes that collectively pool their storage space to store data files and cooperate to route requests to the most likely physical location of data. No broadcast search or centralized location index is employed. Files are referred to in a location-independent manner, and are dynamically replicated in locations near requestors and deleted from locations where there is no interest. It is infeasible to discover the true origin or destination of a file passing through the network, and di$\#$cult for a node operator to determine or be held responsible for the actual physical contents of her own node}, www_section = {Freenet, P2P}, url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.10.4919}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.10.4919.pdf}, }
@conference{Clarke00freenet:a_0, title = {Freenet: A Distributed Anonymous Information Storage and Retrieval System}, author = {Ian Clarke and Sandberg, Oskar and Brandon Wiley and Theodore W. Hong}, booktitle = {Designing Privacy Enhancing Technologies, International Workshop on Design Issues in Anonymity and Unobservability, ,, Proceedings 2001}, year = {2000}, month = {July}, address = {Berkeley, CA, USA}, pages = {46--66}, abstract = {We describe Freenet, an adaptive peer-to-peer network application that permits the publication, replication, and retrieval of data while protecting the anonymity of both authors and readers. Freenet operates as a network of identical nodes that collectively pool their storage space to store data files and cooperate to route requests to the most likely physical location of data. No broadcast search or centralized location index is employed. Files are referred to in a location-independent manner, and are dynamically replicated in locations near requestors and deleted from locations where there is no interest. It is infeasible to discover the true origin or destination of a file passing through the network, and di$\#$cult for a node operator to determine or be held responsible for the actual physical contents of her own node}, url = {http://www.ecse.rpi.edu/Homepages/shivkuma/teaching/sp2001/readings/freenet.pdf}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/freenet.pdf}, %%%%% ERROR: Missing field % www_section = {?????}, }
@article{Clifton:2002:TPP:772862.772867, title = {Tools for privacy preserving distributed data mining}, author = {Clifton, Chris and Kantarcioglu, Murat and Vaidya, Jaideep and Lin, Xiaodong and Zhu, Michael Y.}, journal = {SIGKDD Explorations Newsletter}, volume = {4}, number = {2}, year = {2002}, month = {December}, address = {New York, NY, USA}, pages = {28--34}, publisher = {ACM}, abstract = {Privacy preserving mining of distributed data has numerous applications. Each application poses different constraints: What is meant by privacy, what are the desired results, how is the data distributed, what are the constraints on collaboration and cooperative computing, etc. We suggest that the solution to this is a toolkit of components that can be combined for specific privacy-preserving data mining applications. This paper presents some components of such a toolkit, and shows how they can be used to solve several privacy-preserving data mining problems}, www_section = {PIR, privacy, security}, issn = {1931-0145}, doi = {10.1145/772862.772867}, url = {http://doi.acm.org/10.1145/772862.772867}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/SIGKDD\%20Explor.\%20Newsl.\%20-\%20Distributed\%20data\%20mining.pdf}, }
@conference{CoNext2008, title = {EGOIST: Overlay Routing using Selfish Neighbor Selection}, author = {Georgios Smaragdakis and Vassilis Lekakis and Nikolaos Laoutaris and Azer Bestavros and Byers, John W. and Mema Roussopoulos}, booktitle = {Proceedings of ACM CoNEXT 2008}, year = {2008}, month = {December}, address = {Madrid, Spain}, abstract = {A foundational issue underlying many overlay network applications ranging from routing to peer-to-peer file sharing is that of connectivity management, i.e., folding new arrivals into an existing overlay, and re-wiring to cope with changing network conditions. Previous work has considered the problem from two perspectives: devising practical heuristics for specific applications designed to work well in real deployments, and providing abstractions for the underlying problem that are analytically tractable, especially via game-theoretic analysis. In this paper, we unify these two thrusts by using insights gleaned from novel, realistic theoretic models in the design of Egoist -- a distributed overlay routing system that we implemented, deployed, and evaluated on PlanetLab. Using extensive measurements of paths between nodes, we demonstrate that Egoist's neighbor selection primitives significantly outperform existing heuristics on a variety of performance metrics, including delay, available bandwidth, and node utilization. Moreover, we demonstrate that Egoist is competitive with an optimal, but unscalable full-mesh approach, remains highly effective under significant churn, is robust to cheating, and incurs minimal overhead. Finally, we use a multiplayer peer-to-peer game to demonstrate the value of Egoist to end-user applications}, www_section = {EGOIST, game theory, overlay networks, routing, selfish neighbor selection}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/CoNEXT2008.pdf}, }
@conference{Cohen02replicationstrategies, title = {Replication Strategies in Unstructured Peer-to-Peer Networks}, author = {Edith Cohen and S Shenker}, booktitle = {Proceedings of the 2002 SIGCOMM conference}, organization = {ACM New York, NY, USA}, volume = {Volume 32 , Issue 4}, year = {2002}, month = {October}, address = {Pittsburgh}, pages = {177--190}, publisher = {ACM New York, NY, USA}, abstract = {The Peer-to-Peer (P2P) architectures that are most prevalent in today's Internet are decentralized and unstructured. Search is blind in that it is independent of the query and is thus not more effective than probing randomly chosen peers. One technique to improve the effectiveness of blind search is to proactively replicate data}, url = {http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.19.9873\&rep=rep1\&type=pdf}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/replication.pdf}, %%%%% ERROR: Missing field % www_section = {?????}, }
@conference{Conner:2007:SPM:1377934.1377937, title = {Securing peer-to-peer media streaming systems from selfish and malicious behavior}, author = {Conner, William and Nahrstedt, Klara}, booktitle = {MDS'07. Proceedings of the 4th on Middleware Doctoral Symposium}, organization = {ACM}, volume = {13}, year = {2007}, month = {November}, address = {Newport Beach, CA, USA}, pages = {1--6}, publisher = {ACM}, series = {MDS '07}, abstract = {We present a flexible framework for throttling attackers in peer-to-peer media streaming systems. In such systems, selfish nodes (e.g., free riders) and malicious nodes (e.g., DoS attackers) can overwhelm the system by issuing too many requests in a short interval of time. Since peer-to-peer systems are decentralized, it is difficult for individual peers to limit the aggregate download bandwidth consumed by other remote peers. This could potentially allow selfish and malicious peers to exhaust the system's available upload bandwidth. In this paper, we propose a framework to provide a solution to this problem by utilizing a subset of trusted peers (called kantoku nodes) that collectively monitor the bandwidth usage of untrusted peers in the system and throttle attackers. This framework has been evaluated through simulation thus far. Experiments with a full implementation on a network testbed are part of our future work}, www_section = {accounting, multimedia, peer-to-peer networking, security}, isbn = {978-1-59593-933-3}, doi = {http://doi.acm.org/10.1145/1377934.1377937}, url = {http://doi.acm.org/10.1145/1377934.1377937}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/MDS\%2707\%20-\%20Conner\%20\%26\%20Nahrstedt\%20-\%20Securing\%20peer-to-peer\%20media\%20streaming\%20systems.pdf}, }
@conference{Conrad03multiplelanguage, title = {Multiple language family support for programmable network systems}, author = {Michael Conrad and Marcus Schoeller and Thomas Fuhrmann and Gerhard Bocksch and Martina Zitterbart}, booktitle = {In Proceedings of the 5th Annual International Working Conference on Active Networks (IWAN)}, year = {2003}, abstract = {Various programmable networks have been designed and implemented during the last couple of years. Many of them are focused on a single programming language only. This limitation might{\textemdash}to a certain extend{\textemdash}hinder the productivity of service modules being programmed for such networks. Therefore, the concurrent support of service modules written in multiple programming languages was investigated within the FlexiNet project. Basically, support for three major programming paradigms was incorporated into FlexiNet: compiled programming languages like C, interpreted languages (e.g., Java), and hardware description languages such as VHDL. The key concept can be seen in an integral interface that is used by all three programming languages. This leads to a configuration scheme which is totally transparent to the programming languages used to develop the service. In order to get a better idea about the impact of the programming language used, some measurement experiments were conducted}, www_section = {flexible service platforms, programmable networks}, isbn = {978-3-540-21250-8}, doi = {10.1007/b96396}, url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.68.3301}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/scholler03language.pdf}, }
@conference{Conrad04SecureServiceSignaling, title = {Secure Service Signaling and fast Authorization in Programmable Networks}, author = {Michael Conrad and Thomas Fuhrmann and Marcus Schoeller and Martina Zitterbart}, booktitle = {Proceedings of the 6th International Working Conference on Active Networking (IWAN) 2004}, organization = {Springer Berlin / Heidelberg}, year = {2004}, address = {Lawrence, Kansas}, publisher = {Springer Berlin / Heidelberg}, type = {publication}, abstract = {Programmable networks aim at the fast and flexible creation of services within a network. Often cited examples are audio and video transcoding, application layer multicast, or mobility and resilience support. In order to become commercially viable, programmable networks must provide authentication, authorization and accounting functionality. The mechanisms used to achieve these functionalities must be secure, reliable, and scalable, to be used in production scale programmable networks. Additionally programmable nodes must resist various kinds of attacks, such as denial of service or replay attacks. Fraudulent use by individual users must also be prohibited. This paper describes the design and implementation of a secure, reliable, and scalable signaling mechanism clients can use to initiate service startup and to manage services running on the nodes of a programmable network. This mechanism is designed for production scale networks with AAA-functionality}, www_section = {programmable networks, secrecy}, isbn = {978-3-540-71499-6}, doi = {10.1007/978-3-540-71500-9}, url = {http://i30www.ira.uka.de/research/publications/p2p/}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/iwan2004.pdf}, }
@booklet{Cooley_abs:the, title = {ABS: The Apportioned Backup System}, author = {Joe Cooley and Chris Taylor and Alen Peacock}, year = {2004}, abstract = {Many personal computers are operated with no backup strategy for protecting data in the event of loss or failure. At the same time, PCs are likely to contain spare disk space and unused networking resources. We present the Apportioned Backup System (ABS), which provides a reliable collaborative backup resource by leveraging these independent, distributed resources. With ABS, procuring and maintaining specialized backup hardware is unnecessary. ABS makes efficient use of network and storage resources through use of coding techniques, convergent encryption and storage, and efficient versioning and verification processes. The system also painlessly accommodates dynamic expansion of system compute, storage, and network resources, and is tolerant of catastrophic node failures}, www_section = {apportioned backup system}, url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.120.6858}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.120.6858.pdf}, }
@conference{Coulom:2006:ESB:1777826.1777833, title = {Efficient selectivity and backup operators in Monte-Carlo tree search}, author = {Coulom, R{\'e}mi}, booktitle = {CG'06--Proceedings of the 5th international conference on Computers and games}, organization = {Springer-Verlag}, year = {2007}, address = {Turin, Italy}, pages = {72--83}, publisher = {Springer-Verlag}, series = {CG'06}, abstract = {A Monte-Carlo evaluation consists in estimating a position by averaging the outcome of several random continuations. The method can serve as an evaluation function at the leaves of a min-max tree. This paper presents a new framework to combine tree search with Monte-Carlo evaluation, that does not separate between a min-max phase and a Monte-Carlo phase. Instead of backing-up the min-max value close to the root, and the average value at some depth, a more general backup operator is defined that progressively changes from averaging to minmax as the number of simulations grows. This approach provides a finegrained control of the tree growth, at the level of individual simulations, and allows efficient selectivity. The resulting algorithm was implemented in a 9 {\texttimes} 9 Go-playing program, Crazy Stone, that won the 10th KGS computer-Go tournament}, www_section = {framework, MCTS, Monte-Carlo Tree Search}, isbn = {3-540-75537-3, 978-3-540-75537-1}, url = {http://dl.acm.org/citation.cfm?id=1777826.1777833}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/CG\%2706\%20-\%20Selectivity\%20and\%20backup\%20operators\%20in\%20MCTS.pdf}, }
@booklet{Cox02pastiche:making, title = {Pastiche: Making Backup Cheap and Easy}, author = {Landon P. Cox and Christopher D. Murray and Brian D. Noble}, year = {2002}, abstract = {Backup is cumbersome and expensive. Individual users almost never back up their data, and backup is a significant cost in large organizations. This paper presents Pastiche, a simple and inexpensive backup system. Pastiche exploits excess disk capacity to perform peer-to-peer backup with no administrative costs. Each node minimizes storage overhead by selecting peers that share a significant amount of data. It is easy for common installations to find suitable peers, and peers with high overlap can be identified with only hundreds of bytes. Pastiche provides mechanisms for confidentiality, integrity, and detection of failed or malicious peers. A Pastiche prototype suffers only 7.4\% overhead for a modified Andrew Benchmark, and restore performance is comparable to cross-machine copy}, www_section = {backup, P2P}, url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.15.3254}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.15.3254.pdf}, }
@article{Cox:2004:PDN:972374.972394, title = {Practical, distributed network coordinates}, author = {Russ Cox and Dabek, Frank and Frans M. Kaashoek and Li, Jinyang and Robert Morris}, journal = {SIGCOMM Computer Communication Review}, volume = {34}, year = {2004}, month = {January}, address = {New York, NY, USA}, pages = {113--118}, publisher = {ACM}, abstract = {Vivaldi is a distributed algorithm that assigns synthetic coordinates to internet hosts, so that the Euclidean distance between two hosts' coordinates predicts the network latency between them. Each node in Vivaldi computes its coordinates by simulating its position in a network of physical springs. Vivaldi is both distributed and efficient: no fixed infrastructure need be deployed and a new host can compute useful coordinates after collecting latency information from only a few other hosts. Vivaldi can rely on piggy-backing latency information on application traffic instead of generating extra traffic by sending its own probe packets.This paper evaluates Vivaldi through simulations of 750 hosts, with a matrix of inter-host latencies derived from measurements between 750 real Internet hosts. Vivaldi finds synthetic coordinates that predict the measured latencies with a median relative error of 14 percent. The simulations show that a new host joining an existing Vivaldi system requires fewer than 10 probes to achieve this accuracy. Vivaldi is currently used by the Chord distributed hash table to perform proximity routing, replica selection, and retransmission timer estimation}, www_section = {network coordinates, proximity routing, replica selection, retransmission timer estimation, Vivaldi}, issn = {0146-4833}, doi = {http://doi.acm.org/10.1145/972374.972394}, url = {http://doi.acm.org/10.1145/972374.972394}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/SIGCOMM\%20Comput.\%20Commun.\%20Rev.\%20-\%20Practical\%2C\%20distributed\%20network\%20coordinates.pdf}, }
@conference{Cramer04Bootstrapping, title = {Bootstrapping Locality-Aware P2P Networks}, author = {Cramer, Curt and Kendy Kutzner and Thomas Fuhrmann}, booktitle = {Proceedings of the IEEE International Conference on Networks (ICON 2004)}, volume = {1}, year = {2004}, address = {Singapore}, pages = {357--361}, type = {publication}, abstract = {Bootstrapping is a vital core functionality required by every peer-to-peer (P2P) overlay network. Nodes intending to participate in such an overlay network initially have to find at least one node that is already part of this network. While structured P2P networks (e.g. distributed hash tables, DHTs) define rules about how to proceed after this point, unstructured P2P networks continue using bootstrapping techniques until they are sufficiently connected. In this paper, we compare solutions applicable to the bootstrapping problem. Measurements of an existing system, the Gnutella web caches, highlight the inefficiency of this particular approach. Improved bootstrapping mechanisms could also incorporate locality-awareness into the process. We propose an advanced mechanism by which the overlay topology is--to some extent--matched with the underlying topology. Thereby, the performance of the overall system can be vastly improved}, www_section = {bootstrapping, distributed hash table, P2P}, isbn = {0-7803-8783-X}, doi = {10.1109/ICON.2004.1409169}, url = {http://i30www.ira.uka.de/research/publications/p2p/}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/cramer04bootstrapping.pdf}, }
@conference{Cramer04DemandDrivenClustering, title = {Demand-Driven Clustering in MANETs}, author = {Cramer, Curt and Oliver Stanze and Kilian Weniger and Martina Zitterbart}, booktitle = {Proceedings of the 2004 International Conference on Wireless Networks (ICWN '04)}, volume = {1}, year = {2004}, address = {Las Vegas, NV}, pages = {81--87}, type = {publication}, abstract = {Many clustering protocols for mobile ad hoc networks (MANETs) have been proposed in the literature. With only one exception so far [1], all these protocols are proactive, thus wasting bandwidth when their function is not currently needed. To reduce the signalling traffic load, reactive clustering may be employed. We have developed a clustering protocol named {\textquotedblleft}On-Demand Group Mobility-Based Clustering {\textquotedblright} (ODGMBC) which is reactive. Its goal is to build clusters as a basis for address autoconfiguration and hierarchical routing. The design process especially addresses the notion of group mobility in a MANET. As a result, ODGMBC maps varying physical node groups onto logical clusters. In this paper, ODGMBC is described. It was implemented for the ad hoc network simulator GloMoSim [2] and evaluated using several performance indicators. Simulation results are promising and show that ODGMBC leads to stable clusters. This stability is advantageous for autoconfiguration and routing mechansims to be employed in conjunction with the clustering algorithm. Index Terms {\textemdash} clustering, multi-hop, reactive, MANET, group mobility}, www_section = {mobile Ad-hoc networks, multi-hop networks}, url = {http://i30www.ira.uka.de/research/publications/p2p/}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/cramer04odgmbc.pdf}, }
@conference{Cramer04LifeScience, title = {Peer-to-Peer Overlays and Data Integration in a Life Science Grid}, author = {Cramer, Curt and Andrea Schafferhans and Thomas Fuhrmann}, booktitle = {Proceedings of the First International Workshop of the EU Network of Excellence DELOS on Digital Library Architectures}, year = {2004}, address = {Cagliari, Italy}, pages = {127--138}, type = {publication}, abstract = {Databases and Grid computing are a good match. With the service orientation of Grid computing, the complexity of maintaining and integrating databases can be kept away from the actual users. Data access and integration is performed via services, which also allow to employ an access control. While it is our perception that many proposed Grid applications rely on a centralized and static infrastructure, Peer-to-Peer (P2P) technologies might help to dynamically scale and enhance Grid applications. The focus does not lie on publicly available P2P networks here, but on the self-organizing capabilities of P2P networks in general. A P2P overlay could, e.g., be used to improve the distribution of queries in a data Grid. For studying the combination of these three technologies, Grid computing, databases, and P2P, in this paper, we use an existing application from the life sciences, drug target validation, as an example. In its current form, this system has several drawbacks. We believe that they can be alleviated by using a combination of the service-based architecture of Grid computing and P2P technologies for implementing the services. The work presented in this paper is in progress. We mainly focus on the description of the current system state, its problems and the proposed new architecture. For a better understanding, we also outline the main topics related to the work presented here}, www_section = {GRID, overlay networks, P2P}, url = {http://i30www.ira.uka.de/research/publications/p2p/}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/cramer04lifescience.pdf}, }
@conference{Cramer04Scheduling, title = {Distributed Job Scheduling in a Peer-to-Peer Video Recording System}, author = {Cramer, Curt and Kendy Kutzner and Thomas Fuhrmann}, booktitle = {Proceedings of the Workshop on Algorithms and Protocols for Efficient Peer-to-Peer Applications (PEPPA) at Informatik 2004}, year = {2004}, address = {Ulm, Germany}, pages = {234--238}, type = {publication}, abstract = {Since the advent of Gnutella, Peer-to-Peer (P2P) protocols have matured towards a fundamental design element for large-scale, self-organising distributed systems. Many research efforts have been invested to improve various aspects of P2P systems, like their performance, scalability, and so on. However, little experience has been gathered from the actual deployment of such P2P systems apart from the typical file sharing applications. To bridge this gap and to gain more experience in making the transition from theory to practice, we started building advanced P2P applications whose explicit goal is {\textquotedblleft}to be deployed in the wild{\textquotedblright}. In this paper, we describe a fully decentralised P2P video recording system. Every node in the system is a networked computer (desktop PC or set-top box) capable of receiving and recording DVB-S, i.e. digital satellite TV. Like a normal video recorder, users can program their machines to record certain programmes. With our system, they will be able to schedule multiple recordings in parallel. It is the task of the system to assign the recordings to different machines in the network. Moreover, users can {\textquotedblleft}record broadcasts in the past{\textquotedblright}, i.e. the system serves as a short-term archival storage}, www_section = {DVB, P2P}, url = {http://i30www.ira.uka.de/research/publications/p2p/}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/cramer04scheduling.pdf}, }
@conference{Cuenca-Acuna03planetp:using, title = {PlanetP: Using Gossiping to Build Content Addressable Peer-to-Peer Information Sharing Communities}, author = {Francisco Matias Cuenca-Acuna and Christopher Peery and Richard P. Martin and Thu D. Nguyen}, booktitle = {12th IEEE International Symposium on High Performance Distributed Computing (HPDC-12 '03),}, organization = {IEEE Press}, year = {2003}, address = {Seattle, Washington}, publisher = {IEEE Press}, abstract = {PlanetP is a peer-to-peer system in which searching content is done mostly locally. Every peer knows which content is available at which other peers. The index information is represented compactly using bloom filters and distributed throughout the network using push and pull mechanisms}, url = {http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.14.6056\&rep=rep1\&type=url\&i=0}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/paper.dvi_.pdf}, %%%%% ERROR: Missing field % www_section = {?????}, }
@article{DASEIN, title = {Decentralized Authentication for Self-Sovereign Identities using Name Systems}, author = {Christian Grothoff and Martin Schanzenbach and Annett Laube and Emmanuel Benoist}, %%%%% ERROR: Missing field % journal = {?????}, institution = {Berner Fachhochschule}, number = {847382}, year = {2018}, month = {October}, address = {Bern}, type = {H2020 submission}, abstract = {The GNU Name System (GNS) is a fully decentralized public key infrastructure and name system with private information retrieval semantics. It serves a holistic approach to interact seamlessly with IoT ecosystems and enables people and their smart objects to prove their identity, membership and privileges - compatible with existing technologies. In this report we demonstrate how a wide range of private authentication and identity management scenarios are addressed by GNS in a cost-efficient, usable and secure manner. This simple, secure and privacy-friendly authentication method is a significant breakthrough when cyber peace, privacy and liability are the priorities for the benefit of a wide range of the population. After an introduction to GNS itself, we show how GNS can be used to authenticate servers, replacing the Domain Name System (DNS) and X.509 certificate authorities (CAs) with a more privacy-friendly but equally usable protocol which is trustworthy, human-centric and includes group authentication. We also built a demonstrator to highlight how GNS can be used in medical computing to simplify privacy-sensitive data processing in the Swiss health-care system. Combining GNS with attribute-based encryption, we created ReclaimID, a robust and reliable OpenID Connect-compatible authorization system. It includes simple, secure and privacy-friendly single sign-on to seamlessly share selected attributes with Web services, cloud ecosystems. Further, we demonstrate how ReclaimID can be used to solve the problem of addressing, authentication and data sharing for IoT devices. These applications are just the beginning for GNS; the versatility and extensibility of the protocol will lend itself to an even broader range of use-cases. GNS is an open standard with a complete free software reference implementation created by the GNU project. It can therefore be easily audited, adapted, enhanced, tailored, developed and/or integrated, as anyone is allowed to use the core protocols and implementations free of charge, and to adopt them to their needs under the terms of the GNU Affero General Public License, a free software license approved by the Free Software Foundation.}, keywords = {DNS, GNU Name System, GNUnet, privacy, ReclaimID}, www_section = {DNS, GNU Name System, GNUnet, privacy, ReclaimID}, www_tags = {selected}, url = {https://bibliography.gnunet.org}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/dasein10.pdf}, }
@conference{DBLP:conf/ccs/EdmanS09, title = {AS-awareness in Tor path selection}, author = {Matthew Edman and Paul Syverson}, booktitle = {Proceedings of the 2009 ACM Conference on Computer and Communications Security, CCS 2009, Chicago, Illinois, USA, November 9-13, 2009}, organization = {ACM}, year = {2009}, pages = {380--389}, editor = {Ehab Al-Shaer and Somesh Jha and Angelos D. Keromytis}, publisher = {ACM}, abstract = {Tor is an anonymous communications network with thousands of router nodes worldwide. An intuition reflected in much of the literature on anonymous communications is that, as an anonymity network grows, it becomes more secure against a given observer because the observer will see less of the network. In particular, as the Tor network grows from volunteers operating relays all over the world, it becomes less and less likely for a single autonomous system (AS) to be able to observe both ends of an anonymous connection. Yet, as the network continues to grow significantly, no analysis has been done to determine if this intuition is correct. Further, modifications to Tor's path selection algorithm to help clients avoid an AS-level observer have not been proposed and analyzed. Five years ago a previous study examined the AS-level threat against client and destination addresses chosen a priori to be likely or interesting to examine. Using an AS-level path inference algorithm with improved accuracy, more extensive Internet routing data, and, most importantly, a model of typical Tor client AS-level sources and destinations based on data gathered from the live network, we demonstrate that the threat of a single AS observing both ends of an anonymous Tor connection is greater than previously thought. We look at the growth of the Tor network over the past five years and show that its explosive growth has had only a small impact on the network's robustness against an AS-level attacker. Finally, we propose and evaluate the effectiveness of some simple, AS-aware path selection algorithms that avoid the computational overhead imposed by full AS-level path inference algorithms. Our results indicate that a novel heuristic we propose is more effective against an AS-level observer than other commonly proposed heuristics for improving location diversity in path selection}, www_section = {anonymity, autonomous systems, privacy, Tor}, isbn = {978-1-60558-894-0}, doi = {10.1145/1653662.1653708}, url = {http://portal.acm.org/citation.cfm?id=1653662.1653708}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/EdmanS09.pdf}, }
@conference{DBLP:conf/ccs/TroncosoD09, title = {The bayesian traffic analysis of mix networks}, author = {Carmela Troncoso and George Danezis}, booktitle = {Proceedings of the 2009 ACM Conference on Computer and Communications Security, CCS 2009, Chicago, Illinois, USA, November 9-13, 2009}, organization = {ACM}, year = {2009}, pages = {369--379}, editor = {Ehab Al-Shaer and Somesh Jha and Angelos D. Keromytis}, publisher = {ACM}, abstract = {This work casts the traffic analysis of anonymity systems, and in particular mix networks, in the context of Bayesian inference. A generative probabilistic model of mix network architectures is presented, that incorporates a number of attack techniques in the traffic analysis literature. We use the model to build an Markov Chain Monte Carlo inference engine, that calculates the probabilities of who is talking to whom given an observation of network traces. We provide a thorough evaluation of its correctness and performance, and confirm that mix networks with realistic parameters are secure. This approach enables us to apply established information theoretic anonymity metrics on complex mix networks, and extract information from anonymised traffic traces optimally}, www_section = {anonymity, Markov chain, traffic analysis}, isbn = {978-1-60558-894-0}, doi = {10.1145/1653662.1653707}, url = {http://portal.acm.org/citation.cfm?id=1653662.1653707}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/TroncosoD09.pdf}, }
@conference{DBLP:conf/ccs/VassermanJTHK09, title = {Membership-concealing overlay networks}, author = {Eugene Y. Vasserman and Rob Jansen and James Tyra and Nicholas J. Hopper and Yongdae Kim}, booktitle = {Proceedings of the 2009 ACM Conference on Computer and Communications Security, CCS 2009, Chicago, Illinois, USA, November 9-13, 2009}, organization = {ACM}, year = {2009}, pages = {390--399}, editor = {Ehab Al-Shaer and Somesh Jha and Angelos D. Keromytis}, publisher = {ACM}, www_section = {membership concealment, P2P, privacy}, isbn = {978-1-60558-894-0}, doi = {10.1145/1653662.1653709}, url = {http://portal.acm.org/citation.cfm?id=1653662.1653709}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/VassermanJTHK09.pdf}, }
@conference{DBLP:conf/ccs/YangG03, title = {PPay: micropayments for peer-to-peer systems}, author = {Beverly Yang and Hector Garcia-Molina}, booktitle = {CCS'03. Proceedings od the 10th ACM Conference on Computer and Communications Security}, organization = {ACM}, year = {2003}, month = {October}, address = {Washington, DC, USA}, pages = {300--310}, publisher = {ACM}, www_section = {economics, payment}, isbn = {1-58113-738-9}, doi = {http://dx.doi.org/10.1145/948109.948150}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/CCS\%2703\%20-\%20Yang\%20\%26\%20Garcia-Molina\%20-\%20PPay.pdf}, }
@conference{DBLP:conf/dbsec/Kerschbaum11, title = {Public-Key Encrypted Bloom Filters with Applications to Supply Chain Integrity}, author = {Florian Kerschbaum}, booktitle = {Public-Key Encrypted Bloom Filters with Applications to Supply Chain Integrity}, year = {2011}, pages = {60--75}, www_section = {Unsorted}, url = {https://bibliography.gnunet.org}, }
@conference{DBLP:conf/diau/PfitzmannK00, title = {Anonymity, Unobservability, and Pseudonymity--A Proposal for Terminology}, author = {Andreas Pfitzmann and Marit K{\"o}hntopp}, booktitle = {Workshop on Design Issues in Anonymity and Unobservability}, year = {2000}, pages = {1--9}, url = {https://bibliography.gnunet.org}, %%%%% ERROR: Missing field % www_section = {?????}, }
@conference{DBLP:conf/esorics/DanezisDKT09, title = {The Wisdom of Crowds: Attacks and Optimal Constructions}, author = {George Danezis and Claudia Diaz and Emilia K{\"a}sper and Carmela Troncoso}, booktitle = {Proceedings of the 14th European Symposium on Research in Computer Security (ESORICS 2009), Saint-Malo, France, September 21-23}, organization = {Springer}, volume = {5789}, year = {2009}, pages = {406--423}, editor = {Michael Backes and Peng Ning}, publisher = {Springer}, series = {Lecture Notes in Computer Science}, abstract = {We present a traffic analysis of the ADU anonymity scheme presented at ESORICS 2008, and the related RADU scheme. We show that optimal attacks are able to de-anonymize messages more effectively than believed before. Our analysis applies to single messages as well as long term observations using multiple messages. The search of a {\textquotedblleft}better{\textquotedblright} scheme is bound to fail, since we prove that the original Crowds anonymity system provides the best security for any given mean messaging latency. Finally we present D-Crowds, a scheme that supports any path length distribution, while leaking the least possible information, and quantify the optimal attacks against it}, www_section = {anonymity, Crowds, traffic analysis}, isbn = {978-3-642-04443-4}, doi = {10.1007/978-3-642-04444-1}, url = {http://www.springerlink.com/content/t6q86u137t4762k8/}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/DanezisDKT09.pdf}, }
@conference{DBLP:conf/esorics/MalleshW07, title = {Countering Statistical Disclosure with Receiver-Bound Cover Traffic}, author = {Nayantara Mallesh and Matthew Wright}, booktitle = {Proceedings of ESORICS 2007, 12th European Symposium On Research In Computer Security, Dresden, Germany, September 24-26, 2007, Proceedings}, organization = {Springer}, volume = {4734}, year = {2007}, pages = {547--562}, editor = {Joachim Biskup and Javier Lopez}, publisher = {Springer}, series = {Lecture Notes in Computer Science}, abstract = {Anonymous communications provides an important privacy service by keeping passive eavesdroppers from linking communicating parties. However, using long-term statistical analysis of traffic sent to and from such a system, it is possible to link senders with their receivers. Cover traffic is an effective, but somewhat limited, counter strategy against this attack. Earlier work in this area proposes that privacy-sensitive users generate and send cover traffic to the system. However, users are not online all the time and cannot be expected to send consistent levels of cover traffic, drastically reducing the impact of cover traffic. We propose that the mix generate cover traffic that mimics the sending patterns of users in the system. This receiver-bound cover helps to make up for users that aren't there, confusing the attacker. We show through simulation how this makes it difficult for an attacker to discern cover from real traffic and perform attacks based on statistical analysis. Our results show that receiver-bound cover substantially increases the time required for these attacks to succeed. When our approach is used in combination with user-generated cover traffic, the attack takes a very long time to succeed}, www_section = {anonymity, cover traffic, privacy}, isbn = {978-3-540-74834-2}, doi = {10.1007/978-3-540-74835-9}, url = {http://www.springerlink.com/content/k2146538700m71v7/}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/MalleshW07.pdf}, }
@conference{DBLP:conf/eurocrypt/ChaumP92, title = {Transferred Cash Grows in Size}, author = {David Chaum and Torben P. Pedersen}, booktitle = {EUROCRYPT'92 Workshop on the Theory and Application of of Cryptographic Techniques}, organization = {Springer}, volume = {658}, year = {1992}, month = {May}, address = {Balatonf{\"u}red, Hungary}, pages = {390--407}, publisher = {Springer}, series = {Lecture Notes in Computer Science}, abstract = {All known methods for transferring electronic money have the disadvantages that the number of bits needed to represent the money after each payment increases, and that a payer can recognize his money if he sees it later in the chain of payments (forward traceability). This paper shows that it is impossible to construct an electronic money system providing transferability without the property that the money grows when transferred. Furthermore it is argued that an unlimited powerful user can always recognize his money later. Finally, the lower bounds on the size of transferred electronic money are discussed in terms of secret sharing schemes}, www_section = {electronic money, forward traceability, secret shraing, transfer}, isbn = {3-540-56413-6}, doi = {10.1007/3-540-47555-9_32}, url = {10.1007/3-540-47555-9_32}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/EUROCRYPT\%2792_-_Chaun_\%26_Pedersen_-_Transferred_cash_grows_in_size.pdf}, }
@conference{DBLP:conf/eurocrypt/RussellW02, title = {How to Fool an Unbounded Adversary with a Short Key}, author = {Alexander Russell and Hong Wang}, booktitle = {How to Fool an Unbounded Adversary with a Short Key}, year = {2002}, pages = {133--148}, %%%%% ERROR: Missing field % www_section = {?????}, }
@conference{DBLP:conf/focs/DworkNV12, title = {The Privacy of the Analyst and the Power of the State}, author = {Cynthia Dwork and Moni Naor and Salil P. Vadhan}, booktitle = {The Privacy of the Analyst and the Power of the State}, year = {2012}, pages = {400--409}, www_section = {Unsorted}, url = {https://bibliography.gnunet.org}, }
@conference{DBLP:conf/icc/ChenCLNC08, title = {Experimental Analysis of Super-Seeding in BitTorrent}, author = {Zhijia Chen and Yang Chen and Chuang Lin and Vaibhav Nivargi and Pei Cao}, booktitle = {ICC'08--Proceedings of the 2008 IEEE International Conference on Communications}, organization = {IEEE Computer Society}, year = {2008}, month = {May}, address = {Beijing, China}, pages = {65--69}, publisher = {IEEE Computer Society}, abstract = {With the popularity of BitTorrent, improving its performance has been an active research area. Super-seeding, a special upload policy for initial seeds, improves the efficiency in producing multiple seeds and reduces the uploading cost of the initial seeders. However, the overall benefit of super seeding remains a question. In this paper, we conduct an experimental study over the performance of super-seeding scheme of BitTornado. We attempt to answer the following questions: whether and how much super-seeding saves uploading cost, whether the download time of all peers is decreased by super-seeding, and in which scenario super-seeding performs worse. With varying seed bandwidth and peer behavior, we analyze the overall download time and upload cost of super seeding scheme during random period tests over 250 widely distributed PlanetLab nodes. The results show that benefits of super-seeding depend highly on the upload bandwidth of the initial seeds and the behavior of individual peers. Our work not only provides reference for the potential adoption of super-seeding in BitTorrent, but also much insights for the balance of enhancing Quality of Experience (QoE) and saving cost for a large-scale BitTorrent-like P2P commercial application}, www_section = {BitTorrent, super-seeding}, isbn = {978-1-4244-2075-9}, doi = {http://dx.doi.org/10.1109/ICC.2008.20}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/ICC\%2708\%20-\%20Super-Seeding\%20in\%20BitTorrent.PDF}, }
@conference{DBLP:conf/infocom/ChandraBB04, title = {MultiNet: Connecting to Multiple IEEE 802.11 Networks Using a Single Wireless Card}, author = {Ranveer Chandra and Victor Bahl and Pradeep Bahl}, booktitle = {INFOCOM}, year = {2004}, abstract = {There are a number of scenarios where it is desirable to have a wireless device connect to multiple networks simultaneously. Currently, this is possible only by using multiple wireless network cards in the device. Unfortunately, using multiple wireless cards causes excessive energy drain and consequent reduction of lifetime in battery operated devices. In this paper, we propose a software based approach, called MultiNet, that facilitates simultaneous connections to multiple networks by virtualizing a single wireless card. The wireless card is virtualized by introducing an intermediate layer below IP, which continuously switches the card across multiple networks. The goal of the switching algorithm is to be transparent to the user who sees her machine as being connected to multiple networks. We present the design, implementation, and performance of the MultiNet system.We analyze and evaluate buffering and switching algorithms in terms of delay and energy consumption. Our system has been operational for over twelve months, it is agnostic of the upper layer protocols, and works well over popular IEEE 802.11 wireless LAN cards}, url = {http://www.pubzone.org/dblp/conf/infocom/ChandraBB04}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/18_3.PDF}, %%%%% ERROR: Missing field % www_section = {?????}, }
@conference{DBLP:conf/infocom/LandaGCMR09, title = {A Sybilproof Indirect Reciprocity Mechanism for Peer-to-Peer Networks}, author = {Raul Leonardo Landa Gamiochipi and David Griffin and Richard G. Clegg and Eleni Mykoniati and Miguel Rio}, booktitle = {INFOCOM 2009. The 28th IEEE International Conference on Computer Communications}, organization = {IEEE Computer Society}, year = {2009}, month = {April}, address = {Rio de Janeiro, Brazil}, pages = {343--351}, publisher = {IEEE Computer Society}, abstract = {Although direct reciprocity (Tit-for-Tat) contribution systems have been successful in reducing free-loading in peer-to-peer overlays, it has been shown that, unless the contribution network is dense, they tend to be slow (or may even fail) to converge [1]. On the other hand, current indirect reciprocity mechanisms based on reputation systems tend to be susceptible to sybil attacks, peer slander and whitewashing.In this paper we present PledgeRoute, an accounting mechanism for peer contributions that is based on social capital. This mechanism allows peers to contribute resources to one set of peers and use this contribution to obtain services from a different set of peers, at a different time. PledgeRoute is completely decentralised, can be implemented in both structured and unstructured peer-to-peer systems, and it is resistant to the three kinds of attacks mentioned above.To achieve this, we model contribution transitivity as a routing problem in the contribution network of the peer-to-peer overlay, and we present arguments for the routing behaviour and the sybilproofness of our contribution transfer procedures on this basis. Additionally, we present mechanisms for the seeding of the contribution network, and a combination of incentive mechanisms and reciprocation policies that motivate peers to adhere to the protocol and maximise their service contributions to the overlay}, www_section = {p2p network, reprocity mechanism, sybilproof}, isbn = {978-1-4244-3512-8}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/INFOCOM\%2709\%20-\%20Sybilproof\%20Indirect\%20Reprocity\%20Mechanism\%20for\%20P2P\%20Networks\%20.pdf}, }
@conference{DBLP:conf/infocom/ZhangCY07, title = {MARCH: A Distributed Incentive Scheme for Peer-to-Peer Networks}, author = {Zhan Zhang and Shigang Chen and MyungKeun Yoon}, booktitle = {INFOCOM 2007. 26th IEEE International Conference on Computer Communications}, organization = {IEEE Computer Society}, year = {2007}, month = {May}, address = {Anchorage, Alaska, USA}, pages = {1091--1099}, publisher = {IEEE Computer Society}, abstract = {As peer-to-peer networks grow larger and include more diverse users, the lack of incentive to encourage cooperative behavior becomes one of the key problems. This challenge cannot be fully met by traditional incentive schemes, which suffer from various attacks based on false reports. Especially, due to the lack of central authorities in typical P2P systems, it is difficult to detect colluding groups. Members in the same colluding group can cooperate to manipulate their history information, and the damaging power increases dramatically with the group size. In this paper, we propose a new distributed incentive scheme, in which the benefit that a node can obtain from the system is proportional to its contribution to the system, and a colluding group cannot gain advantage by cooperation regardless of its size. Consequently, the damaging power of colluding groups is strictly limited. The proposed scheme includes three major components: a distributed authority infrastructure, a key sharing protocol, and a contract verification protocol}, www_section = {march}, isbn = {1-4244-1047-9}, doi = {http://dx.doi.org/10.1109/INFCOM.2007.131}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/INFOCOM\%2707\%20-\%20MARCH.pdf}, }
@conference{DBLP:conf/iptps/DabekZDKS03, title = {Towards a Common API for Structured Peer-to-Peer Overlays}, author = {Dabek, Frank and Ben Y. Zhao and Peter Druschel and John Kubiatowicz and Ion Stoica}, booktitle = {IPTPS'03. Proccedings of the Second International Workshop on Peer-to-Peer Systems}, organization = {Springer}, volume = {2735}, year = {2003}, month = {February}, address = {Berkeley, CA, USA}, pages = {33--44}, publisher = {Springer}, series = {Lecture Notes in Computer Science}, abstract = {In this paper, we describe an ongoing effort to define common APIs for structured peer-to-peer overlays and the key abstractions that can be built on them. In doing so, we hope to facilitate independent innovation in overlay protocols, services, and applications, to allow direct experimental comparisons, and to encourage application development by third parties. We provide a snapshot of our efforts and discuss open problems in an effort to solicit feedback from the research community}, www_section = {API, key abstraction}, isbn = {3-540-40724-3}, doi = {http://dx.doi.org/10.1007/978-3-540-45172-3_3}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/IPTPS\%2703\%20-\%20Towards\%20a\%20common\%20API.pdf}, }
@conference{DBLP:conf/ladc/CourtesKP07, title = {Security Rationale for a Cooperative Backup Service for Mobile Devices}, author = {Ludovic Court{\`e}s and Killijian, Marc-Olivier and Powell, David}, booktitle = {LADC}, year = {2007}, pages = {212--230}, abstract = {Mobile devices (e.g., laptops, PDAs, cell phones) are increasingly relied on but are used in contexts that put them at risk of physical damage, loss or theft. This paper discusses security considerations that arise in the design of a cooperative backup service for mobile devices. Participating devices leverage encounters with other devices to temporarily replicate critical data. Anyone is free to participate in the cooperative service, without requiring any prior trust relationship with other participants. In this paper, we identify security threats relevant in this context as well as possible solutions and discuss how they map to low-level security requirements related to identity and trust establishment. We propose self-organized, policy-neutral mechanisms that allow the secure designation and identification of participating devices. We show that they can serve as a building block for a wide range of cooperation policies that address most of the security threats we are concerned with. We conclude on future directions}, www_section = {backup, reputation, self-organization}, doi = {10.1007/978-3-540-75294-3}, url = {http://www.springerlink.com/content/p210q274g22j8g77/}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.106.5673.pdf}, }
@conference{DBLP:conf/middleware/BertierFGKL10, title = {The Gossple Anonymous Social Network}, author = {Marin Bertier and Davide Frey and Rachid Guerraoui and Anne-Marie Kermarrec and Vincent Leroy}, booktitle = {Proceedings of the ACM/IFIP/USENIX 11th International Conference on Middleware}, organization = {ACM/IFIP/USENIX}, year = {2010}, pages = {191--211}, publisher = {ACM/IFIP/USENIX}, abstract = {While social networks provide news from old buddies, you can learn a lot more from people you do not know, but with whom you share many interests. We show in this paper how to build a network of anonymous social acquaintances using a gossip protocol we call Gossple, and how to leverage such a network to enhance navigation within Web 2.0 collaborative applications, {\`a} la LastFM and Delicious. Gossple nodes (users) periodically gossip digests of their interest profiles and compute their distances (in terms of interest) with respect to other nodes. This is achieved with little bandwidth and storage, fast convergence, and without revealing which profile is associated with which user. We evaluate Gossple on real traces from various Web 2.0 applications with hundreds of PlanetLab hosts and thousands of simulated nodes}, www_section = {gossple, social networks}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/gossple2010Bertier.pdf}, url = {https://bibliography.gnunet.org}, }
@conference{DBLP:conf/netys/BoutetFJKR13, title = {FreeRec: An Anonymous and Distributed Personalization Architecture}, author = {Antoine Boutet and Davide Frey and Arnaud Jegou and Anne-Marie Kermarrec and Heverson B. Ribeiro}, booktitle = {FreeRec: An Anonymous and Distributed Personalization Architecture}, year = {2013}, pages = {58--73}, www_section = {Unsorted}, url = {https://bibliography.gnunet.org}, }
@conference{DBLP:conf/p2p/AmannEHF08, title = {IgorFs: A Distributed P2P File System}, author = {Bernhard Amann and Benedikt Elser and Yaser Houri and Thomas Fuhrmann}, booktitle = {Peer-to-Peer Computing}, year = {2008}, pages = {77--78}, abstract = {IgorFs is a distributed, decentralized peer-to-peer (P2P) file system that is completely transparent to the user. It is built on top of the Igor peer-to-peer overlay network, which is similar to Chord, but provides additional features like service orientation or proximity neighbor and route selection. IgorFs offers an efficient means to publish data files that are subject to frequent but minor modifications. In our demonstration we show two use cases for IgorFs: the first example is (static) software-distribution and the second example is (dynamic) file distribution}, www_section = {distributed storage, P2P}, doi = {10.1109/P2P.2008.19}, url = {http://www.pubzone.org/dblp/conf/p2p/AmannEHF08}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/amann08igorfs.pdf}, }
@book{DBLP:conf/p2p/EberspacherS05a, title = {First and Second Generation of Peer-to-Peer Systems}, author = {J{\"o}rg Ebersp{\"a}cher and R{\"u}diger Schollmeier}, booktitle = {Peer-to-Peer Systems and Applications}, organization = {Springer Berlin / Heidelberg}, volume = {3485}, year = {2005}, pages = {35--56}, publisher = {Springer Berlin / Heidelberg}, series = {Lecture Notes in Computer Science}, abstract = {Peer-to-Peer (P2P) networks appeared roughly around the year 2000 when a broadband Internet infrastructure (even at the network edge) became widely available. Other than traditional networks Peer-to-Peer networks do not rely on a specific infrastructure offering transport services. Instead they form {\textquotedblleft}overlay structures{\textquotedblright} focusing on content allocation and distribution based on TCP or HTTP connections. Whereas in a standard Client-Server configuration content is stored and provided only via some central server(s), Peer-to-Peer networks are highly decentralized and locate a desired content at some participating peer and provide the corresponding IP address of that peer to the searching peer. The download of that content is then initiated using a separate connection, often using HTTP. Thus, the high load usually resulting for a central server and its surrounding network is avoided leading to a more even distribution of load on the underlying physical network. On the other hand, such networks are typically subject to frequent changes because peers join and leave the network without any central control}, www_section = {generation, P2P, peer-to-peer networking}, }
@conference{DBLP:conf/saint/SaitoMSSM07, title = {Local Production, Local Consumption: Peer-to-Peer Architecture for a Dependable and Sustainable Social Infrastructure}, author = {Saito, Kenji and Morino, Eiichi and Yoshihiko Suko and Takaaki Suzuki and Murai, Jun}, booktitle = {SAINT'07. Proceedings of the 2007 Symposium on Applications and the Internet}, organization = {IEEE Computer Society}, year = {2007}, month = {January}, address = {Hiroshima, Japan}, pages = {0--58}, publisher = {IEEE Computer Society}, abstract = {Peer-to-peer (P2P) is a system of overlay networks such that participants can potentially take symmetrical roles. This translates itself into a design based on the philosophy of Local Production, Local Consumption (LPLC), originally an agricultural concept to promote sustainable local economy. This philosophy helps enhancing survivability of a society by providing a dependable economic infrastructure and promoting the power of individuals. This paper attempts to put existing works of P2P designs into the perspective of the five-layer architecture model to realize LPLC, and proposes future research directions toward integration of P2P studies for actualization of a dependable and sustainable social infrastructure}, www_section = {LPLC, P2P, peer-to-peer networking}, doi = {http://doi.ieeecomputersociety.org/10.1109/SAINT-W.2007.59}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/SAINT\%2707\%20-\%20Local\%20production\%2C\%20local\%20consumption\%20p2p\%20architecture.pdf}, }
@conference{DBLP:conf/sigcomm/JainDPF05, title = {Using redundancy to cope with failures in a delay tolerant network}, author = {Sushant Jain and Michael J. Demmer and Rabin K. Patra and Fall, Kevin}, booktitle = {SIGCOMM}, organization = {ACM New York, NY, USA}, year = {2005}, address = {Philadelphia, Pennsylvania, USA}, pages = {109--120}, publisher = {ACM New York, NY, USA}, abstract = {We consider the problem of routing in a delay tolerant network (DTN) in the presence of path failures. Previous work on DTN routing has focused on using precisely known network dynamics, which does not account for message losses due to link failures, buffer overruns, path selection errors, unscheduled delays, or other problems. We show how to split, replicate, and erasure code message fragments over multiple delivery paths to optimize the probability of successful message delivery. We provide a formulation of this problem and solve it for two cases: a 0/1 (Bernoulli) path delivery model where messages are either fully lost or delivered, and a Gaussian path delivery model where only a fraction of a message may be delivered. Ideas from the modern portfolio theory literature are borrowed to solve the underlying optimization problem. Our approach is directly relevant to solving similar problems that arise in replica placement in distributed file systems and virtual node placement in DHTs. In three different simulated DTN scenarios covering a wide range of applications, we show the effectiveness of our approach in handling failures}, www_section = {delay tolerant network, routing}, isbn = {1-59593-009-4}, doi = {10.1145/1080091.1080106}, url = {http://portal.acm.org/citation.cfm?doid=1080091.1080106}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/paper-JaiDem.pdf}, }
@conference{DBLP:conf/sigecom/GhoshR11, title = {Selling Privacy at Auction}, author = {Arpita Ghosh and Aaron Roth}, booktitle = {Selling Privacy at Auction}, year = {2011}, pages = {199--208}, url = {https://bibliography.gnunet.org}, www_section = {Unsorted}, }
@conference{DBLP:conf/sp/DanezisG09, title = {Sphinx: A Compact and Provably Secure Mix Format}, author = {George Danezis and Ian Goldberg}, booktitle = {Proceedings of the 30th IEEE Symposium on Security and Privacy (S\&P 2009), 17-20 May, Oakland, California, USA}, organization = {IEEE Computer Society}, year = {2009}, pages = {269--282}, publisher = {IEEE Computer Society}, abstract = {Sphinx is a cryptographic message format used to relay anonymized messages within a mix network. It is more compact than any comparable scheme, and supports a full set of security features: indistinguishable replies, hiding the path length and relay position, as well as providing unlinkability for each leg of the message's journey over the network. We prove the full cryptographic security of Sphinx in the random oracle model, and we describe how it can be used as an efficient drop-in replacement in deployed remailer systems}, www_section = {anonymity, cryptography}, isbn = {978-0-7695-3633-0}, doi = {10.1109/SP.2009.15}, url = {http://portal.acm.org/citation.cfm?id=1607723.1608138}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/DanezisG09.pdf}, }
@conference{DBLP:conf/sp/NarayananS09, title = {De-anonymizing Social Networks}, author = {Arvind Narayanan and Vitaly Shmatikov}, booktitle = {Proceedings of the 30th IEEE Symposium on Security and Privacy (S\&P 2009), 17-20 May, Oakland, California, USA}, organization = {IEEE Computer Society}, year = {2009}, pages = {173--187}, publisher = {IEEE Computer Society}, abstract = {Operators of online social networks are increasingly sharing potentially sensitive information about users and their relationships with advertisers, application developers, and data-mining researchers. Privacy is typically protected by anonymization, i.e., removing names, addresses, etc. We present a framework for analyzing privacy and anonymity in social networks and develop a new re-identification algorithm targeting anonymized social-network graphs. To demonstrate its effectiveness on real-world networks, we show that a third of the users who can be verified to have accounts on both Twitter, a popular microblogging service, and Flickr, an online photo-sharing site, can be re-identified in the anonymous Twitter graph with only a 12\% error rate. Our de-anonymization algorithm is based purely on the network topology, does not require creation of a large number of dummy "sybil" nodes, is robust to noise and all existing defenses, and works even when the overlap between the target network and the adversary's auxiliary information is small}, www_section = {anonymity, network topology, privacy}, isbn = {978-0-7695-3633-0}, url = {http://randomwalker.info/social-networks/}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/NarayananS09.pdf}, }
@conference{DBLP:conf/sss/Kermarrec09, title = {Challenges in Personalizing and Decentralizing the Web: An Overview of GOSSPLE}, author = {Anne-Marie Kermarrec}, booktitle = {Challenges in Personalizing and Decentralizing the Web: An Overview of GOSSPLE}, year = {2009}, pages = {1--16}, url = {https://bibliography.gnunet.org}, www_section = {Unsorted}, }
@conference{DBLP:conf/stoc/Ullman13, title = {Answering $n^{2+o(1)}$ Counting Queries with Differential Privacy is Hard}, author = {Jonathan Ullman}, booktitle = {Answering $n^{2+o(1)}$ Counting Queries with Differential Privacy is Hard}, year = {2013}, pages = {361--370}, www_section = {Unsorted}, url = {https://bibliography.gnunet.org}, }
@conference{DBLP:conf/tridentcom/AlbrechtH10, title = {Managing Distributed Applications Using Gush}, author = {Jeannie R. Albrecht and Danny Yuxing Huang}, booktitle = {TRIDENTCOM}, year = {2010}, pages = {401--411}, www_section = {distributed applications, emulation, GENI, PlanetLab, testbed}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/gush.pdf}, url = {https://bibliography.gnunet.org}, }
@conference{DBLP:conf/tridentcom/HermenierR12, title = {How to Build a Better Testbed: Lessons from a Decade of Network Experiments on Emulab}, author = {Fabien Hermenier and Robert Ricci}, booktitle = {TRIDENTCOM}, year = {2012}, pages = {287--304}, www_section = {emulab, emulation, testbed}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/how-to-build-a-better-testbed.pdf}, url = {https://bibliography.gnunet.org}, }
@conference{DBLP:conf/tridentcom/NguyenRKFMB10, title = {How to Build Complex, Large-Scale Emulated Networks}, author = {Hung X. Nguyen and Roughan, Matthew and Knight, Simon and Nick Falkner and Maennel, Olaf and Randy Bush}, booktitle = {TRIDENTCOM}, year = {2010}, pages = {3--18}, www_section = {autonetkit, emulation, netkit, network, testbed, virtualization}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/AutoNetkit_0.pdf}, url = {https://bibliography.gnunet.org}, }
@conference{DBLP:conf/tridentcom/PeralaPML10, title = {A Novel Testbed for P2P Networks}, author = {Pekka H. J. Per{\"a}l{\"a} and Jori P. Paananen and Milton Mukhopadhyay and Jukka-Pekka Laulajainen}, booktitle = {TRIDENTCOM}, year = {2010}, pages = {69--83}, www_section = {emulation, P2P, testbed}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/A_Novel_Testbed_for_P2P_Networks.pdf}, url = {https://bibliography.gnunet.org}, }
@conference{DBLP:conf/usenix/HiblerRSDGSWL08, title = {Large-scale Virtualization in the Emulab Network Testbed}, author = {Mike Hibler and Robert Ricci and Leigh Stoller and Jonathon Duerig and Shashi Guruprasad and Tim Stack and Kirk Webb and Jay Lepreau}, booktitle = {USENIX Annual Technical Conference}, year = {2008}, pages = {113--128}, www_section = {emulab, emulation, testbed, virtualization}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/emulab.pdf}, }
@article{DBLP:journals/corr/abs-1202-4503, title = {A Critical Look at Decentralized Personal Data Architectures}, author = {Arvind Narayanan and Vincent Toubiana and Solon Barocas and Helen Nissenbaum and Dan Boneh}, journal = {CoRR}, volume = {abs/1202.4503}, year = {2012}, month = {February}, abstract = {While the Internet was conceived as a decentralized network, the most widely used web applications today tend toward centralization. Control increasingly rests with centralized service providers who, as a consequence, have also amassed unprecedented amounts of data about the behaviors and personalities of individuals. Developers, regulators, and consumer advocates have looked to alternative decentralized architectures as the natural response to threats posed by these centralized services. The result has been a great variety of solutions that include personal data stores (PDS), infomediaries, Vendor Relationship Management (VRM) systems, and federated and distributed social networks. And yet, for all these efforts, decentralized personal data architectures have seen little adoption. This position paper attempts to account for these failures, challenging the accepted wisdom in the web community on the feasibility and desirability of these approaches. We start with a historical discussion of the development of various categories of decentralized personal data architectures. Then we survey the main ideas to illustrate the common themes among these efforts. We tease apart the design characteristics of these systems from the social values that they (are intended to) promote. We use this understanding to point out numerous drawbacks of the decentralization paradigm, some inherent and others incidental. We end with recommendations for designers of these systems for working towards goals that are achievable, but perhaps more limited in scope and ambition}, www_section = {distributed social networks, economics, personal data stores, policy, privacy, web}, url = {https://bibliography.gnunet.org}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/CoRR\%20-\%20Critical\%20look\%20at\%20decentralization.pdf}, }
@article{DBLP:journals/corr/abs-cs-0611016, title = {Increasing Data Resilience of Mobile Devices with a Collaborative Backup Service}, author = {Damien Martin-Guillerez and Michel Ban{\^a}tre and Paul Couderc}, journal = {CoRR}, volume = {abs/cs/0611016}, year = {2006}, abstract = {Whoever has had his cell phone stolen knows how frustrating it is to be unable to get his contact list back. To avoid data loss when losing or destroying a mobile device like a PDA or a cell phone, data is usually backed-up to a fixed station. However, in the time between the last backup and the failure, important data can have been produced and then lost. To handle this issue, we propose a transparent collaborative backup system. Indeed, by saving data on other mobile devices between two connections to a global infrastructure, we can resist to such scenarios. In this paper, after a general description of such a system, we present a way to replicate data on mobile devices to attain a prerequired resilience for the backup}, url = {http://www.pubzone.org/dblp/journals/corr/abs-cs-0611016}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/0611016v1.pdf}, %%%%% ERROR: Missing field % www_section = {?????}, }
@article{DBLP:journals/ijufks/Sweene02, title = {k-Anonymity: A Model for Protecting Privacy}, author = {Latanya Sweeney}, journal = {International Journal of Uncertainty, Fuzziness and Knowledge-Based Systems}, volume = {10}, number = {5}, year = {2002}, pages = {557--570}, %%%%% ERROR: Missing field % www_section = {?????}, }
@article{DBLP:journals/pvldb/Amer-YahiaBLS08, title = {Efficient network aware search in collaborative tagging sites}, author = {Sihem Amer-Yahia and Michael Benedikt and Laks V. S. Lakshmanan and Julia Stoyanovich}, journal = {PVLDB'08}, volume = {1}, number = {1}, year = {2008}, month = {August}, address = {Auckland, New Zealand}, url = {https://bibliography.gnunet.org}, %%%%% ERROR: Missing field % www_section = {?????}, }
@article{DBLP:journals/tdp/NojimaK09, title = {Cryptographically secure Bloom-filters}, author = {Ryo Nojima and Youki Kadobayashi}, journal = {Transactions on Data Privacy}, volume = {2}, number = {2}, year = {2009}, pages = {131--139}, url = {https://bibliography.gnunet.org}, www_section = {Unsorted}, }
@booklet{DD08Survey, title = {A Survey of Anonymous Communication Channels}, author = {George Danezis and Claudia Diaz}, number = {MSR-TR-2008-35}, year = {2008}, month = {January}, publisher = {Microsoft Research}, abstract = {We present an overview of the field of anonymous communications, from its establishment in 1981 from David Chaum to today. Key systems are presented categorized according to their underlying principles: semi-trusted relays, mix systems, remailers, onion routing, and systems to provide robust mixing. We include extended discussions of the threat models and usage models that different schemes provide, and the trade-offs between the security properties offered and the communication characteristics different systems support}, www_section = {onion routing, robustness}, url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.138.7951}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/DD08Survey.pdf}, }
@conference{DK11, title = {Practical Privacy-Preserving Multiparty Linear Programming Based on Problem Transformation}, author = {Dreier, Jannik and Florian Kerschbaum}, booktitle = {PASSAT'11--Proceedings of the Third IEEE International Conference on Information Privacy, Security, Risk and Trust}, organization = {IEEE Computer Society}, year = {2011}, month = {October}, address = {Boston, Massachusetts, USA}, pages = {916--924}, publisher = {IEEE Computer Society}, abstract = {Cryptographic solutions to privacy-preserving multiparty linear programming are slow. This makes them unsuitable for many economically important applications, such as supply chain optimization, whose size exceeds their practically feasible input range. In this paper we present a privacy-preserving trans- formation that allows secure outsourcing of the linear program computation in an ef?cient manner. We evaluate security by quantifying the leakage about the input after the transformation and present implementation results. Using this transformation, we can mostly replace the costly cryptographic operations and securely solve problems several orders of magnitude larger}, www_section = {cryptography, SMC}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/PASSAT\%2711\%20-\%20Multiparty\%20linear\%20programming.pdf}, url = {https://bibliography.gnunet.org}, }
@conference{Dabek:2004:DDL:1251175.1251182, title = {Designing a DHT for Low Latency and High Throughput}, author = {Dabek, Frank and Li, Jinyang and Emil Sit and Robertson, James and Frans M. Kaashoek and Robert Morris}, booktitle = {NSDI'04--Proceedings of the 1st conference on Symposium on Networked Systems Design and Implementation}, organization = {USENIX Association}, year = {2004}, month = {March}, address = {San Francisco, CA, USA}, pages = {7--7}, publisher = {USENIX Association}, abstract = {Designing a wide-area distributed hash table (DHT) that provides high-throughput and low-latency network storage is a challenge. Existing systems have explored a range of solutions, including iterative routing, recursive routing, proximity routing and neighbor selection, erasure coding, replication, and server selection. This paper explores the design of these techniques and their interaction in a complete system, drawing on the measured performance of a new DHT implementation and results from a simulator with an accurate Internet latency model. New techniques that resulted from this exploration include use of latency predictions based on synthetic co-ordinates, efficient integration of lookup routing and data fetching, and a congestion control mechanism suitable for fetching data striped over large numbers of servers. Measurements with 425 server instances running on 150 PlanetLab and RON hosts show that the latency optimizations reduce the time required to locate and fetch data by a factor of two. The throughput optimizations result in a sustainable bulk read throughput related to the number of DHT hosts times the capacity of the slowest access link; with 150 selected PlanetLab hosts, the peak aggregate throughput over multiple clients is 12.8 megabytes per second}, www_section = {distributed hash table, high-throughput, latency}, url = {http://dl.acm.org/citation.cfm?id=1251175.1251182}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/NSDI\%2704\%20-\%20Designing\%20a\%20DHT\%20for\%20low\%20latency\%20and\%20high\%20throughput.pdf}, }
@article{Dabek:2004:VDN:1030194.1015471, title = {Vivaldi: a decentralized network coordinate system}, author = {Dabek, Frank and Russ Cox and Frans M. Kaashoek and Robert Morris}, journal = {SIGCOMM Computer Communication Review}, volume = {34}, year = {2004}, month = {October}, address = {New York, NY, USA}, pages = {15--26}, publisher = {ACM}, abstract = {Large-scale Internet applications can benefit from an ability to predict round-trip times to other hosts without having to contact them first. Explicit measurements are often unattractive because the cost of measurement can outweigh the benefits of exploiting proximity information. Vivaldi is a simple, light-weight algorithm that assigns synthetic coordinates to hosts such that the distance between the coordinates of two hosts accurately predicts the communication latency between the hosts. Vivaldi is fully distributed, requiring no fixed network infrastructure and no distinguished hosts. It is also efficient: a new host can compute good coordinates for itself after collecting latency information from only a few other hosts. Because it requires little com-munication, Vivaldi can piggy-back on the communication patterns of the application using it and scale to a large number of hosts. An evaluation of Vivaldi using a simulated network whose latencies are based on measurements among 1740 Internet hosts shows that a 2-dimensional Euclidean model with height vectors embeds these hosts with low error (the median relative error in round-trip time prediction is 11 percent)}, www_section = {internet topology, network coordinates, Vivaldi}, issn = {0146-4833}, doi = {http://doi.acm.org/10.1145/1030194.1015471}, url = {http://doi.acm.org/10.1145/1030194.1015471}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/SIGCOMM\%20Compt.\%20Commun.\%20Rev.\%20-\%20Vivaldi.pdf}, }
@conference{Damiani02areputation-based, title = {A Reputation-Based Approach for Choosing Reliable Resources in Peer-to-Peer Networks}, author = {Ernesto Damiani and Sabrina De Capitani di Vimercati and Stefano Paraboschi and Pierangela Samarati and Fabio Violante}, booktitle = {In Proceedings of the 9th ACM Conference on Computer and Communications Security}, organization = {ACM Press}, year = {2002}, pages = {207--216}, publisher = {ACM Press}, abstract = {Peer-to-peer (P2P) applications have seen an enormous success, and recently introduced P2P services have reached tens of millions of users. A feature that significantly contributes to the success of many P2P applications is user anonymity. However, anonymity opens the door to possible misuses and abuses, exploiting the P2P network as a way to spread tampered with resources, including Trojan Horses, viruses, and spam. To address this problem we propose a self-regulating system where the P2P network is used to implement a robust reputation mechanism. Reputation sharing is realized through a distributed polling algorithm by which resource requestors can assess the reliability of a resource offered by a participant before initiating the download. This way, spreading of malicious contents will be reduced and eventually blocked. Our approach can be straightforwardly piggybacked on existing P2P protocols and requires modest modifications to current implementations}, url = {http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.7.1784\&rep=rep1\&type=pdf}, %%%%% ERROR: Missing field % www_section = {?????}, }
@conference{Damiani02areputation-based_0, title = {A Reputation-Based Approach for Choosing Reliable Resources in Peer-to-Peer Networks}, author = {Ernesto Damiani and Sabrina De Capitani di Vimercati and Stefano Paraboschi and Pierangela Samarati and Fabio Violante}, booktitle = {In Proceedings of the 9th ACM Conference on Computer and Communications Security}, organization = {ACM Press}, year = {2002}, pages = {207--216}, publisher = {ACM Press}, abstract = {Peer-to-peer (P2P) applications have seen an enormous success, and recently introduced P2P services have reached tens of millions of users. A feature that significantly contributes to the success of many P2P applications is user anonymity. However, anonymity opens the door to possible misuses and abuses, exploiting the P2P network as a way to spread tampered with resources, including Trojan Horses, viruses, and spam. To address this problem we propose a self-regulating system where the P2P network is used to implement a robust reputation mechanism. Reputation sharing is realized through a distributed polling algorithm by which resource requestors can assess the reliability of a resource offered by a participant before initiating the download. This way, spreading of malicious contents will be reduced and eventually blocked. Our approach can be straightforwardly piggybacked on existing P2P protocols and requires modest modifications to current implementations}, url = {http://seclab.dti.unimi.it/Papers/ccs02.ps}, %%%%% ERROR: Missing field % www_section = {?????}, }
@conference{Dan:SFMix03, title = {Forward Secure Mixes}, author = {George Danezis}, booktitle = {Proceedings of 7th Nordic Workshop on Secure IT Systems}, year = {2002}, month = {November}, address = {Karlstad, Sweden}, pages = {195--207}, editor = {Fisher-Hubner, Jonsson}, abstract = {New threats such as compulsion to reveal logs, secret and private keys as well as to decrypt material are studied in the context of the security of mix networks. After a comparison of this new threat model with the traditional one, a new construction is introduced, the fs-mix, that minimizes the impact that such powers have on the security of the network, by using forward secure communication channels and key updating operation inside the mixes. A discussion about the forward security of these new proposals and some extensions is included}, www_section = {anonymity, forward security, mix, traffic analysis}, url = {http://citeseer.ist.psu.edu/533725.html}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Dan-SFMix03.pdf}, }
@conference{DanSer04, title = {Statistical Disclosure or Intersection Attacks on Anonymity Systems}, author = {George Danezis and Andrei Serjantov}, booktitle = {Proceedings of 6th Information Hiding Workshop (IH 2004)}, organization = {Springer Berlin / Heidelberg}, year = {2004}, month = {May}, address = {Toronto}, publisher = {Springer Berlin / Heidelberg}, series = {LNCS}, abstract = {In this paper we look at the information an attacker can extract using a statistical disclosure attack. We provide analytical results about the anonymity of users when they repeatedly send messages through a threshold mix following the model of Kesdogan, Agrawal and Penz [7] and through a pool mix. We then present a statistical disclosure attack that can be used to attack models of anonymous communication networks based on pool mixes. Careful approximations make the attack computationally efficient. Such models are potentially better suited to derive results that could apply to the security of real anonymous communication networks}, www_section = {anonymity, statistical analysis}, isbn = {978-3-540-24207-9}, doi = {10.1007/b104759}, url = {http://www.springerlink.com/content/tqljb3hybk4rubla/}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.6.2954.pdf}, }
@conference{Danezis03mixminion:design, title = {Mixminion: Design of a Type III Anonymous Remailer Protocol}, author = {George Danezis and Roger Dingledine and Nick Mathewson}, booktitle = {In Proceedings of the 2003 IEEE Symposium on Security and Privacy}, year = {2003}, pages = {2--15}, abstract = {We present Mixminion, a message-based anonymous remailer protocol with secure single-use reply blocks. Mix nodes cannot distinguish Mixminion forward messages from reply messages, so forward and reply messages share the same anonymity set. We add directory servers that allow users to learn public keys and performance statistics of participating remailers, and we describe nymservers that provide long-term pseudonyms using single-use reply blocks as a primitive. Our design integrates link encryption between remailers to provide forward anonymity. Mixminion works in a real-world Internet environment, requires little synchronization or coordination between nodes, and protects against known anonymity-breaking attacks as well as or better than other systems with similar design parameters. 1. Overview Chaum first introduced anonymous remailers over 20 years ago [7]}, url = {http://mixminion.net/minion-design.pdf}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/minion-design.pdf}, %%%%% ERROR: Missing field % www_section = {?????}, }
@conference{Danezis04theeconomics, title = {The Economics of Censorship Resistance}, author = {George Danezis and Ross Anderson}, booktitle = {In The Third Annual Workshop on Economics and Information Security (WEIS04}, year = {2004}, abstract = {We propose the first economic model of censorship resistance. Early peer-to-peer systems, such as the Eternity Service, sought to achieve censorshop resistance by distributing content randomly over the whole Internet. An alternative approach is to encourage nodes to serve resources they are interested in. Both architectures have been implemented but so far there has been no quantitative analysis of the protection they provide. We develop a model inspired by economics and con ict theory to analyse these systems. Under our assumptions, resource distribution according to nodes' individual preferences provides better stability and resistance to censorship. Our results may have wider application too}, url = {http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.4.7003\&rep=rep1\&type=pdf}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.4.7003\%20\%281\%29.pdf}, %%%%% ERROR: Missing field % www_section = {?????}, }
@conference{Danezis05sybil-resistantdht, title = {Sybil-resistant DHT routing}, author = {George Danezis and Chris Lesniewski-laas and Frans M. Kaashoek and Ross Anderson}, booktitle = {In ESORICS}, organization = {Springer}, year = {2005}, pages = {305--318}, publisher = {Springer}, abstract = {Distributed Hash Tables (DHTs) are very efficient distributed systems for routing, but at the same time vulnerable to disruptive nodes. Designers of such systems want them used in open networks, where an adversary can perform a sybil attack by introducing a large number of corrupt nodes in the network, considerably degrading its performance. We introduce a routing strategy that alleviates some of the effects of such an attack by making sure that lookups are performed using a diverse set of nodes. This ensures that at least some of the nodes queried are good, and hence the search makes forward progress. This strategy makes use of latent social information present in the introduction graph of the network}, www_section = {distributed hash table, routing}, url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.65.3947}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/sybildht.pdf}, }
@conference{Delerablee:2007:IBE:1781454.1781471, title = {Identity-based broadcast encryption with constant size ciphertexts and private keys}, author = {Delerabl{\'e}e, C{\'e}cile}, booktitle = {ASIACRYPT 2007--Proceedings of the Advances in Cryptology 13th International Conference on Theory and Application of Cryptology and Information Security}, organization = {Springer-Verlag}, year = {2007}, month = {December}, address = {Kuching, Malaysia}, pages = {200--215}, publisher = {Springer-Verlag}, series = {ASIACRYPT'07}, abstract = {This paper describes the first identity-based broadcast encryption scheme (IBBE) with constant size ciphertexts and private keys. In our scheme, the public key is of size linear in the maximal size m of the set of receivers, which is smaller than the number of possible users (identities) in the system. Compared with a recent broadcast encryption system introduced by Boneh, Gentry and Waters (BGW), our system has comparable properties, but with a better efficiency: the public key is shorter than in BGW. Moreover, the total number of possible users in the system does not have to be fixed in the setup}, www_section = {ciphertext, encryption, IBBE, private key}, isbn = {3-540-76899-8, 978-3-540-76899-9}, url = {http://dl.acm.org/citation.cfm?id=1781454.1781471}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/ASIACRYPT\%2707\%20-\%20IBBE\%20with\%20constant\%20size\%20ciphertexts\%20and\%20private\%20keys.pdf}, }
@booklet{Demers94thebayou, title = {The Bayou Architecture: Support for Data Sharing among Mobile Users}, author = {Alan Demers and Karin Petersen and Mike Spreitzer and Douglas Terry and Marvin Theimer and Brent Welch}, year = {1994}, abstract = {The Bayou System is a platform of replicated, highly-available, variable-consistency, mobile databases on which to build collaborative applications. This paper presents the preliminary system architecture along with the design goals that influenced it. We take a fresh, bottom-up and critical look at the requirements of mobile computing applications and carefully pull together both new and existing techniques into an overall architecture that meets these requirements. Our emphasis is on supporting application-specific conflict detection and resolution and on providing application controlled inconsistency}, www_section = {Unsorted}, doi = {10.1109/WMCSA.1994.37}, url = {http://portal.acm.org/citation.cfm?id=1440028}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.40.8955.pdf}, }
@conference{Department01instrumentingthe, title = {Instrumenting The World With Wireless Sensor Networks}, author = {Deborah Estrin and Gregory J. Pottie and L. Girod and Mani Srivastava}, booktitle = {In International Conference on Acoustics, Speech, and Signal Processing (ICASSP 2001)}, year = {2001}, pages = {2033--2036}, abstract = {Pervasive micro-sensing and actuation may revolutionize the way in which we understand and manage complex physical systems: from airplane wings to complex ecosystems. The capabilities for detailed physical monitoring and manipulation offer enormous opportunities for almost every scientific discipline, and it will alter the feasible granularity of engineering}, www_section = {wireless sensor network}, url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.3.3027}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.3.3027.pdf}, }
@conference{Deswarte91intrusiontolerance, title = {Intrusion Tolerance in Distributed Computing Systems}, author = {Yves Deswarte and Laurent Blain and Jean-charles Fabre}, booktitle = {In Proceedings of the IEEE Symposium on Research in Security and Privacy}, year = {1991}, pages = {110--121}, abstract = {An intrusion-tolerant distributed system is a system which is designed so that any intrusion into apart of the system will not endanger confidentiality, integrity and availability. This approach is suitable for distributed systems, because distribution enables isolation of elements so that an intrusion gives physical access to only a part of the system. By intrusion, we mean not only computer break-ins by non-registered people, but also attempts by registered users to exceed or to abuse their privileges. In particular, possible malice of security administrators is taken into account. This paper describes how some functions of distributed systems can be designed to tolerate intrusions, in particular security functions such as user authentication and authorization, and application functions such as file management}, url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.56.9968}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.56.9968.pdf}, www_section = {Unsorted}, }
@conference{Diaz02, title = {Towards measuring anonymity}, author = {Claudia Diaz and Stefaan Seys and Joris Claessens and Bart Preneel}, booktitle = {Proceedings of Privacy Enhancing Technologies Workshop (PET 2002)}, organization = {Springer-Verlag, LNCS 2482}, year = {2002}, month = {April}, editor = {Roger Dingledine and Paul Syverson}, publisher = {Springer-Verlag, LNCS 2482}, abstract = {This paper introduces an information theoretic model that allows to quantify the degree of anonymity provided by schemes for anonymous connections. It considers attackers that obtain probabilistic information about users. The degree is based on the probabilities an attacker, after observing the system, assigns to the different users of the system as being the originators of a message. As a proof of concept, the model is applied to some existing systems. The model is shown to be very useful for evaluating the level of privacy a system provides under various attack scenarios, for measuring the amount of information an attacker gets with a particular attack and for comparing different systems amongst each other}, www_section = {anonymity, attack, privacy}, isbn = {978-3-540-00565-0}, doi = {10.1007/3-540-36467-6}, url = {http://www.springerlink.com/content/3qb837jkpgukc6b5/}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/article-89.pdf}, }
@booklet{Diaz02towardsmeasuring, title = {Towards Measuring Anonymity}, author = {Claudia Diaz and Stefaan Seys and Joris Claessens and Bart Preneel}, year = {2002}, publisher = {Springer-Verlag}, abstract = {This paper introduces an information theoretic model that allows to quantify the degree of anonymity provided by schemes for anonymous connections. It considers attackers that obtain probabilistic information about users. The degree is based on the probabilities an attacker, after observing the system, assigns to the dierent users of the system as being the originators of a message. As a proof of concept, the model is applied to some existing systems. The model is shown to be very useful for evaluating the level of privacy a system provides under various attack scenarios, for measuring the amount of information an attacker gets with a particular attack and for comparing dierent systems amongst each other}, url = {http://www.cosic.esat.kuleuven.be/publications/article-89.pdf}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/anonimity.pdf}, %%%%% ERROR: Missing field % www_section = {?????}, }
@mastersthesis{DiazThesis05, title = {Anonymity and Privacy in Electronic Services}, author = {Claudia Diaz}, school = {Katholieke Universiteit Leuven}, year = {2005}, month = {December}, address = {Leuven, Belgium}, type = {phd}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/DiazThesis05.pdf}, %%%%% ERROR: Missing field % www_section = {?????}, }
@conference{Dimakis:2010:NCD:1861840.1861868, title = {Network coding for distributed storage systems}, author = {Dimakis, Alexandros G. and Godfrey, Brighten and Wu, Yunnan and Wainwright, Martin J. and Ramchandran, Kannan}, booktitle = {INFOCOM 2007. 26th IEEE International Conference on Computer Communications}, organization = {IEEE Press}, volume = {56}, year = {2007}, month = {May}, address = {Anchorage, Alaska, USA}, pages = {4539--4551}, publisher = {IEEE Press}, abstract = {Distributed storage systems provide reliable access to data through redundancy spread over individually unreliable nodes. Application scenarios include data centers, peer-to-peer storage systems, and storage in wireless networks. Storing data using an erasure code, in fragments spread across nodes, requires less redundancy than simple replication for the same level of reliability. However, since fragments must be periodically replaced as nodes fail, a key question is how to generate encoded fragments in a distributed way while transferring as little data as possible across the network. For an erasure coded system, a common practice to repair from a single node failure is for a new node to reconstruct the whole encoded data object to generate just one encoded block. We show that this procedure is sub-optimal. We introduce the notion of regenerating codes, which allow a new node to communicate functions of the stored data from the surviving nodes. We show that regenerating codes can significantly reduce the repair bandwidth. Further, we show that there is a fundamental tradeoff between storage and repair bandwidth which we theoretically characterize using flow arguments on an appropriately constructed graph. By invoking constructive results in network coding, we introduce regenerating codes that can achieve any point in this optimal tradeoff}, www_section = {distributed storage, network coding, peer-to-peer storage, Regenerating Codes}, issn = {0018-9448}, doi = {http://dx.doi.org/10.1109/TIT.2010.2054295}, url = {http://dx.doi.org/10.1109/TIT.2010.2054295}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/INFOCOM\%2707\%20-\%20Network\%20coding\%20for\%20distributed\%20storage\%20systems.pdf}, }
@booklet{Dingledine01areputation, title = {A Reputation System to Increase MIX-net Reliability}, author = {Roger Dingledine and Michael J. Freedman and David Hopwood and David Molnar}, year = {2001}, abstract = {We describe a design for a reputation system that increases the reliability and thus efficiency of remailer services. Our reputation system uses a MIX-net in which MIXes give receipts for intermediate messages. Together with a set of witnesses, these receipts allow senders to verify the correctness of each MIX and prove misbehavior to the witnesses}, url = {http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.19.7912\&rep=rep1\&type=pdf}, %%%%% ERROR: Missing field % www_section = {?????}, }
@conference{Dingledine02reliablemix, title = {Reliable MIX Cascade Networks through Reputation}, author = {Roger Dingledine and Paul Syverson}, booktitle = {Financial Cryptography. Springer-Verlag, LNCS 2357}, organization = {Springer Verlag}, year = {2002}, publisher = {Springer Verlag}, abstract = {We describe a MIX cascade protocol and a reputation system that together increase the reliability of a network of MIX cascades. In our protocol, MIX nodes periodically generate a communally random seed that, along with their reputations, determines cascade configuration}, url = {http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.19.9316\&rep=rep1\&type=pdf}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.19.9316.pdf}, %%%%% ERROR: Missing field % www_section = {?????}, }
@conference{Dingledine03reputationin, title = {Reputation in P2P Anonymity Systems}, author = {Roger Dingledine and Nick Mathewson and Paul Syverson}, booktitle = {In Workshop on Economics of Peer-to-Peer Systems}, year = {2003}, abstract = {Decentralized anonymity systems tend to be unreliable, because users must choose nodes in the network without knowing the entire state of the network. Reputation systems promise to improve reliability by predicting network state. In this paper we focus on anonymous remailers and anonymous publishing, explain why the systems can benefit from reputation, and describe our experiences designing reputation systems for them while still ensuring anonymity. We find that in each example we first must redesign the underlying anonymity system to support verifiable transactions}, www_section = {anonymity, P2P, redundancy, remailer}, url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.14.4740}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.14.4740.pdf}, }
@conference{Dischinger:2008:DBB:1452520.1452523, title = {Detecting BitTorrent Blocking}, author = {Dischinger, Marcel and Mislove, Alan and Haeberlen, Andreas and P. Krishna Gummadi}, booktitle = {IMC'08. Proceedings of the 8th ACM SIGCOMM conference on Internet measurement}, organization = {ACM}, year = {2008}, month = {October}, address = {Vouliagmeni, Greece}, pages = {3--8}, publisher = {ACM}, series = {IMC '08}, abstract = {Recently, it has been reported that certain access ISPs are surreptitiously blocking their customers from uploading data using the popular BitTorrent file-sharing protocol. The reports have sparked an intense and wide-ranging policy debate on network neutrality and ISP traffic management practices. However, to date, end users lack access to measurement tools that can detect whether their access ISPs are blocking their BitTorrent traffic. And since ISPs do not voluntarily disclose their traffic management policies, no one knows how widely BitTorrent traffic blocking is deployed in the current Internet. In this paper, we address this problem by designing an easy-to-use tool to detect BitTorrent blocking and by presenting results from a widely used public deployment of the tool}, www_section = {BitTorrent, blocking, network measurement}, isbn = {978-1-60558-334-1}, doi = {http://doi.acm.org/10.1145/1452520.1452523}, url = {http://doi.acm.org/10.1145/1452520.1452523}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/IMC\%2708\%20-\%20Detecting\%20BitTorrent\%20Blocking.pdf}, }
@conference{DistributedSearch2014Hermann, title = {Censorship-Resistant and Privacy-Preserving Distributed Web Search}, author = {Michael Herrmann and Ren Zhang and Kai-Chun Ning and Claudia Diaz}, booktitle = {IEEE International Conference on Peer to Peer computing}, year = {2014}, abstract = {The vast majority of Internet users are relying on centralized search engine providers to conduct their web searches. However, search results can be censored and search queries can be recorded by these providers without the user's knowledge. Distributed web search engines based on peer-to-peer networks have been proposed to mitigate these threats. In this paper we analyze the three most popular real-world distributed web search engines: Faroo, Seeks and Yacy, with respect to their censorship resistance and privacy protection. We show that none of them provides an adequate level of protection against an adversary with modest resources. Recognizing these flaws, we identify security properties a censorship-resistant and privacy-preserving distributed web search engine should provide. We propose two novel defense mechanisms called node density protocol and webpage verification protocol to achieve censorship resistance and show their effectiveness and feasibility with simulations. Finally, we elaborate on how state-of-the-art defense mechanisms achieve privacy protection in distributed web search engines}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/DistributedSearch2014Hermann.pdf}, www_section = {Unsorted}, url = {https://bibliography.gnunet.org}, }
@conference{Douceur01competitivehill-climbing, title = {Competitive Hill-Climbing Strategies for Replica Placement in a Distributed File System}, author = {John R. Douceur and Roger Wattenhofer}, booktitle = {In DISC}, year = {2001}, pages = {48--62}, abstract = {The Farsite distributed file system stores multiple replicas of files on multiple machines, to provide file access even when some machines are unavailable. Farsite assigns file replicas to machines so as to maximally exploit the different degrees of availability of different machines, given an allowable replication factor R. We use competitive analysis and simulation to study the performance of three candidate hillclimbing replica placement strategies, MinMax, MinRand, and RandRand, each of which successively exchanges the locations of two file replicas. We show that the MinRand and RandRand strategies are perfectly competitive for R = 2 and 2/3-competitive for R = 3. For general R, MinRand is at least 1/2-competitive and RandRand is at least 10/17-competitive. The MinMax strategy is not competitive. Simulation results show better performance than the theoretic worst-case bounds}, url = {http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.22.6802\&rep=rep1\&type=pdf}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/hill\%20climbing.pdf}, %%%%% ERROR: Missing field % www_section = {?????}, }
@conference{Douceur:2002:RSD:850928.851884, title = {Reclaiming Space from Duplicate Files in a Serverless Distributed File System}, author = {John R. Douceur and Adya, Atul and Bolosky, William J. and Simon, Dan and Marvin Theimer}, booktitle = {ICDCS'02--Proceedings of the 22nd International Conference on Distributed Computing Systems (ICDCS'02)}, organization = {IEEE Computer Society}, year = {2002}, month = {July}, address = {Vienna, Austria}, pages = {0--617}, publisher = {IEEE Computer Society}, series = {ICDCS '02}, abstract = {The Farsite distributed file system provides availability by replicating each file onto multiple desktop computers. Since this replication consumes significant storage space, it is important to reclaim used space where possible. Measurement of over 500 desktop file systems shows that nearly half of all consumed space is occupied by duplicate files. We present a mechanism to reclaim space from this incidental duplication to make it available for controlled file replication. Our mechanism includes: (1) convergent encryption, which enables duplicate files to be coalesced into the space of a single file, even if the files are encrypted with different users' keys; and (2) SALAD, a Self-Arranging Lossy Associative Database for aggregating file content and location information in a decentralized, scalable, fault-tolerant manner. Large-scale simulation experiments show that the duplicate-file coalescing system is scalable, highly effective, and fault-tolerant}, www_section = {convergent encryption, distributed file system, duplicate files, farsite, SALAD, serverless}, isbn = {0-7695-1585-1}, doi = {http://dx.doi.org/10.1109/ICDCS.2002.1022312}, url = {http://dl.acm.org/citation.cfm?id=850928.851884}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/ICDCS\%2702\%20-\%20Reclaiming\%20space\%20for\%20duplicate\%20files.pdf}, }
@conference{Douceur:2002:SA:646334.687813, title = {The Sybil Attack}, author = {John R. Douceur}, booktitle = {IPTPS'01--Revised Papers from the First International Workshop on Peer-to-Peer Systems}, organization = {Springer-Verlag London}, year = {2002}, month = {March}, address = {Cambridge, MA}, pages = {251--260}, publisher = {Springer-Verlag London}, series = {Revised Papers from the First International Workshop on Peer-to-Peer Systems}, abstract = {Large-scale peer-to-peer systems face security threats from faulty or hostile remote computing elements. To resist these threats, many such systems employ redundancy. However, if a single faulty entity can present multiple identities, it can control a substantial fraction of the system, thereby undermining this redundancy. One approach to preventing these "Sybil attacks" is to have a trusted agency certify identities. This paper shows that, without a logically centralized authority, Sybil attacks are always possible except under extreme and unrealistic assumptions of resource parity and coordination among entities}, www_section = {attack, peer-to-peer networking, security threat, Sybil attack}, isbn = {3-540-44179-4}, url = {http://dl.acm.org/citation.cfm?id=646334.687813}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/IPTPS\%2702\%20-\%20Douceur\%20-\%20The\%20Sybil\%20Attack.pdf}, }
@conference{Douceur:2002:SDS:784592.784803, title = {A Secure Directory Service based on Exclusive Encryption}, author = {John R. Douceur and Adya, Atul and Benaloh, Josh and Bolosky, William J. and Yuval, Gideon}, booktitle = {ACSAC'02--Proceedings of the 18th Annual Computer Security Applications Conference}, organization = {IEEE Computer Society}, year = {2002}, month = {December}, address = {San Diego, CA, USA}, pages = {0--172}, publisher = {IEEE Computer Society}, series = {ACSAC '02}, abstract = {We describe the design of a Windows file-system directory service that ensures the persistence, integrity, privacy, syntactic legality, and case-insensitive uniqueness of the names it indexes. Byzantine state replication provides persistence and integrity, and encryption imparts privacy. To enforce Windows' baroque name syntax--including restrictions on allowable characters, on the terminal character, and on several specific names--we develop a cryptographic process, called "exclusive encryption," that inherently excludes syntactically illegal names and that enables the exclusion of case-insensitively duplicate names without access to their plaintext. This process excludes entire names by mapping the set of allowed strings to the set of all strings, excludes certain characters through an amended prefix encoding, excludes terminal characters through varying the prefix coding by character index, and supports case-insensitive comparison of names by extracting and encrypting case information separately. We also address the issues of hiding name-length information and access-authorization information, and we report a newly discovered problem with enforcing case-insensitive uniqueness for Unicode names}, www_section = {directory service, encryption, exclusive encryption, Windows}, isbn = {0-7695-1828-1}, doi = {http://dx.doi.org/10.1109/CSAC.2002.1176289}, url = {http://dl.acm.org/citation.cfm?id=784592.784803}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/ACSAC\%2702\%20-\%20A\%20secure\%20directory\%20service\%20based\%20on\%20exclusive\%20encryption.pdf}, }
@mastersthesis{Douglas-thesis, title = {A taxonomy for and analysis of anonymous communications networks}, author = {Douglas Kelly}, school = {Air Force Institute of Technology}, year = {2009}, month = {March}, type = {phd}, abstract = {Any entity operating in cyberspace is susceptible to debilitating attacks. With cyber attacks intended to gather intelligence and disrupt communications rapidly replacing the threat of conventional and nuclear attacks, a new age of warfare is at hand. In 2003, the United States acknowledged that the speed and anonymity of cyber attacks makes distinguishing among the actions of terrorists, criminals, and nation states difficult. Even President Obama's Cybersecurity Chief-elect feels challenged by the increasing sophistication of cyber attacks. Indeed, the rising quantity and ubiquity of new surveillance technologies in cyberspace enables instant, undetectable, and unsolicited information collection about entities. Hence, anonymity and privacy are becoming increasingly important issues. Anonymization enables entities to protect their data and systems from a diverse set of cyber attacks and preserve privacy. This research provides a systematic analysis of anonymity degradation, preservation and elimination in cyberspace to enchance the security of information assets. This includes discovery/obfuscation of identities and actions of/from potential adversaries. First, novel taxonomies are developed for classifying and comparing the wide variety of well-established and state-of-the-art anonymous networking protocols. These expand the classical definition of anonymity and are the first known to capture the peer-to-peer and mobile ad hoc anonymous protocol family relationships. Second, a unique synthesis of state-of-the-art anonymity metrics is provided. This significantly aids an entities ability to reliably measure changing anonymity levels; thereby, increasing their ability to defend against cyber attacks. Finally, a novel epistemic-based model is created to characterize how an adversary reasons with knowledge to degrade anonymity}, url = {http://oai.dtic.mil/oai/oai?verb=getRecord\&metadataPrefix=html\&identifier=ADA495688}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Douglas-thesis.pdf}, %%%%% ERROR: Missing field % www_section = {?????}, }
@conference{Druschel01past:a, title = {PAST: A large-scale, persistent peer-to-peer storage utility}, author = {Peter Druschel and Antony Rowstron}, booktitle = {In HotOS VIII}, year = {2001}, pages = {75--80}, abstract = {This paper sketches the design of PAST, a large-scale, Internet-based, global storage utility that provides scalability, high availability, persistence and security. PAST is a peer-to-peer Internet application and is entirely selforganizing. PAST nodes serve as access points for clients, participate in the routing of client requests, and contribute storage to the system. Nodes are not trusted, they may join the system at any time and may silently leave the system without warning. Yet, the system is able to provide strong assurances, efficient storage access, load balancing and scalability}, www_section = {peer-to-peer storage}, url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.1.1674}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.1.1674.pdf}, }
@conference{Duminuco:2009:PSR:1584339.1584602, title = {A Practical Study of Regenerating Codes for Peer-to-Peer Backup Systems}, author = {Alessandro Duminuco and E W Biersack}, booktitle = {ICDCS'09--Proceedings of the 29th IEEE International Conference on Distributed Computing Systems}, organization = {IEEE Computer Society}, year = {2009}, month = {June}, address = {Montreal, Qu{\'e}bec, Canada}, pages = {376--384}, publisher = {IEEE Computer Society}, series = {ICDCS '09}, abstract = {In distributed storage systems, erasure codes represent an attractive solution to add redundancy to stored data while limiting the storage overhead. They are able to provide the same reliability as replication requiring much less storage space. Erasure coding breaks the data into pieces that are encoded and then stored on different nodes. However, when storage nodes permanently abandon the system, new redundant pieces must be created. For erasure codes, generating a new piece requires the transmission of k pieces over the network, resulting in a k times higher reconstruction traffic as compared to replication. Dimakis proposed a new class of codes, called Regenerating Codes, which are able to provide both the storage efficiency of erasure codes and the communication efficiency of replication. However, Dimakis gave only a theoretical description of the codes without discussing implementation issues or computational costs. We have done a real implementation of Random Linear Regenerating Codes that allows us to measure their computational cost, which can be significant if the parameters are not chosen properly. However, we also find that there exist parameter values that result in a significant reduction of the communication overhead at the expense of a small increase in storage cost and computation, which makes these codes very attractive for distributed storage systems}, www_section = {Backup Systems, erasure codes, evaluation, peer-to-peer networking, Regenerating Codes, storage}, isbn = {978-0-7695-3659-0}, doi = {http://dx.doi.org/10.1109/ICDCS.2009.14}, url = {http://dx.doi.org/10.1109/ICDCS.2009.14}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/ICDCS\%2709\%20-\%20Regenerating\%20codes\%20for\%20p2p\%20backup\%20systems.pdf}, }
@conference{Dwork2007, title = {The Price of Privacy and the Limits of LP Decoding}, author = {Cynthia Dwork and Frank D. McSherry and Kunal Talwar}, booktitle = {The Price of Privacy and the Limits of LP Decoding}, year = {2007}, pages = {85--94}, %%%%% ERROR: Missing field % www_section = {?????}, }
@article{EURECOM+2885, title = {Long term study of peer behavior in the KAD DHT}, author = {Steiner, Moritz and En-Najjary, Taoufik and E W Biersack}, journal = {IEEE/ACM Transactions on Networking}, volume = {17}, year = {2009}, month = {May}, chapter = {1371}, abstract = {Distributed hash tables (DHTs) have been actively studied in literature and many different proposals have been made on how to organize peers in a DHT. However, very few DHTs have been implemented in real systems and deployed on a large scale. One exception is KAD, a DHT based on Kademlia, which is part of eDonkey, a peer-to-peer file sharing system with several million simultaneous users. We have been crawling a representative subset of KAD every five minutes for six months and obtained information about geographical distribution of peers, session times, daily usage, and peer lifetime. We have found that session times are Weibull distributed and we show how this information can be exploited to make the publishing mechanism much more efficient. Peers are identified by the so-called KAD ID, which up to now was assumed to be persistent. However, we observed that a fraction of peers changes their KAD ID as frequently as once a session. This change of KAD IDs makes it difficult to characterize end-user behavior. For this reason we have been crawling the entire KAD network once a day for more than a year to track end-users with static IP addresses, which allows us to estimate end-user lifetime and the fraction of end-users changing their KAD ID}, www_section = {churn, distributed hash table, KAD, Kademlia}, issn = {1063-6692}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Long\%20Term\%20Study\%20of\%20Peer\%20Behavior\%20in\%20the\%20kad\%20DHT.pdf}, url = {https://bibliography.gnunet.org}, }
@article{EdmanSY07, title = {A Combinatorial Approach to Measuring Anonymity}, author = {Matthew Edman and Fikret Sivrikaya and B{\"u}lent Yener}, journal = {Intelligence and Security Informatics, 2007 IEEE}, year = {2007}, month = {May}, pages = {356--363}, abstract = {In this paper we define a new metric for quantifying the degree of anonymity collectively afforded to users of an anonymous communication system. We show how our metric, based on the permanent of a matrix, can be useful in evaluating the amount of information needed by an observer to reveal the communication pattern as a whole. We also show how our model can be extended to include probabilistic information learned by an attacker about possible sender-recipient relationships. Our work is intended to serve as a complementary tool to existing information-theoretic metrics, which typically consider the anonymity of the system from the perspective of a single user or message}, www_section = {anonymity}, isbn = {142441329X}, doi = {10.1109/ISI.2007.379497}, url = {http://www.mendeley.com/research/a-combinatorial-approach-to-measuring-anonymity/}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/EdmanSY07.pdf}, }
@conference{ElGamal:1985:PKC:19478.19480, title = {A Public Key Cryptosystem and a Signature Scheme Based on Discrete Logarithms}, author = {El Gamal, Taher}, booktitle = {Proceedings of CRYPTO 84 on Advances in cryptology}, organization = {Springer-Verlag New York, Inc}, year = {1985}, month = {January}, address = {Santa Barbara, California}, pages = {10--18}, publisher = {Springer-Verlag New York, Inc}, abstract = {A new signature scheme is proposed together with an implementation of the Diffie--Hellman key distribution scheme that achieves a public key cryptosystem. The security of both systems relies on the difficulty of computing discrete logarithms over finite fields}, www_section = {cryptosystem, discrete logarithms, public key, signature scheme}, isbn = {0-387-15658-5}, url = {http://dl.acm.org/citation.cfm?id=19478.19480s}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/CRYPTO\%2784\%20-\%20El\%20Gamal\%20-\%20Public\%20Key\%20Cryptosystem.pdf}, }
@conference{Electrical04designingincentives, title = {Designing Incentives for Peer-to-Peer Routing}, author = {Alberto Blanc and Yi-Kai Liu and Vahdat, Amin}, booktitle = {INFOCOM 2005, 24th Annual Joint Conference of the IEEE Computer and Communications Societies}, organization = {IEEE Computer Society}, volume = {1}, year = {2005}, month = {March}, address = {Miami, FL, USA}, pages = {374--385}, publisher = {IEEE Computer Society}, abstract = {In a peer-to-peer network, nodes are typically required to route packets for each other. This leads to a problem of "free-loaders", nodes that use the network but refuse to route other nodes' packets. In this paper we study ways of designing incentives to discourage free-loading. We model the interactions between nodes as a "random matching game", and describe a simple reputation system that provides incentives for good behavior. Under certain assumptions, we obtain a stable subgame-perfect equilibrium. We use simulations to investigate the robustness of this scheme in the presence of noise and malicious nodes, and we examine some of the design trade-offs. We also evaluate some possible adversarial strategies, and discuss how our results might apply to real peer-to-peer systems}, www_section = {economics, free-loader, free-loading, peer-to-peer networking, system design}, isbn = {0743-166X}, issn = {0-7803-8968-9}, doi = {10.1109/INFCOM.2005.1497907}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/INFOCOM\%2705\%20-\%20Designing\%20incentives\%20for\%20peer-to-peer\%20routing.pdf}, }
@conference{Eppstein:2011:WDE:2018436.2018462, title = {What's the difference?: efficient set reconciliation without prior context}, author = {Eppstein, David and Goodrich, Michael T. and Uyeda, Frank and Varghese, George}, booktitle = {Proceedings of the ACM SIGCOMM 2011 conference}, organization = {ACM}, year = {2011}, address = {New York, NY, USA}, pages = {218--229}, publisher = {ACM}, series = {SIGCOMM '11}, www_section = {difference digest, GNUnet, invertible bloom filter, set difference}, isbn = {978-1-4503-0797-0}, doi = {10.1145/2018436.2018462}, url = {http://doi.acm.org/10.1145/2018436.2018462}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/EppGooUye-SIGCOMM-11.pdf}, }
@conference{Eschenauer02akey-management, title = {A Key-Management Scheme for Distributed Sensor Networks}, author = {Laurent Eschenauer and Virgil D. Gligor}, booktitle = {In Proceedings of the 9th ACM Conference on Computer and Communications Security}, organization = {ACM Press}, year = {2002}, pages = {41--47}, publisher = {ACM Press}, abstract = {Distributed Sensor Networks (DSNs) are ad-hoc mobile networks that include sensor nodes with limited computation and communication capabilities. DSNs are dynamic in the sense that they allow addition and deletion of sensor nodes after deployment to grow the network or replace failing and unreliable nodes. DSNs may be deployed in hostile areas where communication is monitored and nodes are subject to capture and surreptitious use by an adversary. Hence DSNs require cryptographic protection of communications, sensorcapture detection, key revocation and sensor disabling. In this paper, we present a key-management scheme designed to satisfy both operational and security requirements of DSNs}, www_section = {DNS, mobile Ad-hoc networks}, url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.19.9193}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.19.9193.pdf}, }
@article{Eugster:2003:LPB:945506.945507, title = {Lightweight probabilistic broadcast}, author = {Patrick Eugster and Rachid Guerraoui and Sidath B. Handurukande and Petr Kouznetsov and Anne-Marie Kermarrec}, journal = {ACM Trans. Comput. Syst}, volume = {21}, year = {2003}, month = {November}, address = {New York, NY, USA}, pages = {341--374}, publisher = {ACM}, www_section = {Broadcast, buffering, garbage collection, gossip, noise, randomization, reliability, scalability}, issn = {0734-2071}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/lightweight_prob_broadcast.pdf}, }
@booklet{Fakult04peerstore:better, title = {PeerStore: Better Performance by Relaxing in Peer-to-Peer Backup}, author = {Martin Landers and Han Zhang and Kian-Lee Tan}, year = {2004}, abstract = {Backup is cumbersome. To be effective, backups have to be made at regular intervals, forcing users to organize and store a growing collection of backup media. In this paper we propose a novel Peer-to-Peer backup system, PeerStore, that allows the user to store his backups on other people's computers instead. PeerStore is an adaptive, cost-effective system suitable for all types of networks ranging from LAN, WAN to large unstable networks like the Internet. The system consists of two layers: metadata layer and symmetric trading layer. Locating blocks and duplicate checking is accomplished by the metadata layer while the actual data distribution is done between pairs of peers after they have established a symmetric data trade. By decoupling the metadata management from data storage, the system offers a significant reduction of the maintenance cost and preserves fairness among peers. Results show that PeerStore has a reduced maintenance cost comparing to pStore. PeerStore also realizes fairness because of the symmetric nature of the trades}, www_section = {backup, P2P}, url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.58.8067}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/peerstore-better-performance-by.pdf}, }
@conference{Fargier:1996:MCS:1892875.1892901, title = {Mixed constraint satisfaction: a framework for decision problems under incomplete knowledge}, author = {Fargier, H{\'e}l{\`e}ne and Lang, J{\'e}r{\^o}me and Schiex, Thomas}, booktitle = {AAAI'96--Proceedings of the 13th National Conference on Artificial Intelligence}, organization = {AAAI Press}, year = {1996}, month = {August}, address = {Portland, OR, United States}, pages = {175--180}, publisher = {AAAI Press}, series = {AAAI'96}, abstract = {Constraint satisfaction is a powerful tool for representing and solving decision problems with complete knowledge about the world. We extend the CSP framework so as to represent decision problems under incomplete knowledge. The basis of the extension consists in a distinction between controllable and uncontrollable variables -- hence the terminology "mixed CSP" -- and a "solution" gives actually a conditional decision. We study the complexity of deciding the consistency of a mixed CSP. As the problem is generally intractable, we propose an algorithm for finding an approximate solution}, www_section = {algorithms, constraint satisfaction, decision problem, framework, imcomplete knowledge, mixed CSP}, isbn = {0-262-51091-X}, url = {http://dl.acm.org/citation.cfm?id=1892875.1892901}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/AAAI\%2796\%20-\%20Mixed\%20constraint\%20satisfaction.pdf}, }
@conference{Feamster02infranet:circumventing, title = {Infranet: Circumventing Web Censorship and Surveillance}, author = {Nick Feamster and Magdalena Balazinska and Greg Harfst and Hari Balakrishnan and David Karger}, booktitle = {In Proceedings of the 11th USENIX Security Symposium}, organization = {Association}, year = {2002}, pages = {247--262}, publisher = {Association}, abstract = {An increasing number of countries and companies routinely block or monitor access to parts of the Internet. To counteract these measures, we propose Infranet, a system that enables clients to surreptitiously retrieve sensitive content via cooperating Web servers distributed across the global Internet. These Infranet servers provide clients access to censored sites while continuing to host normal uncensored content. Infranet uses a tunnel protocol that provides a covert communication channel between its clients and servers, modulated over standard HTTP transactions that resemble innocuous Web browsing. In the upstream direction, Infranet clients send covert messages to Infranet servers by associating meaning to the sequence of HTTP requests being made. In the downstream direction, Infranet servers return content by hiding censored data in uncensored images using steganographic techniques. We describe the design, a prototype implementation, security properties, and performance of Infranet. Our security analysis shows that Infranet can successfully circumvent several sophisticated censoring techniques}, url = {http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.18.5049\&rep=rep1\&type=pdf}, %%%%% ERROR: Missing field % www_section = {?????}, }
@conference{Feigenbaum:2002:DAM:570810.570812, title = {Distributed algorithmic mechanism design: recent results and future directions}, author = {Feigenbaum, Joan and S Shenker}, booktitle = {DIALM'06. Proceedings of the 6th international workshop on Discrete algorithms and methods for mobile computing and communications}, organization = {ACM}, year = {2002}, month = {September}, address = {Atlanta, Georgia}, pages = {1--13}, publisher = {ACM}, series = {DIALM '02}, abstract = {Distributed Algorithmic Mechanism Design (DAMD) combines theoretical computer science's traditional focus on computational tractability with its more recent interest in incentive compatibility and distributed computing. The Internet's decentralized nature, in which distributed computation and autonomous agents prevail, makes DAMD a very natural approach for many Internet problems. This paper first outlines the basics of DAMD and then reviews previous DAMD results on multicast cost sharing and interdomain routing. The remainder of the paper describes several promising research directions and poses some specific open problems}, www_section = {algorithmic mechanism design, algorithms, distributed computation, multicast, routing}, isbn = {1-58113-587-4}, doi = {http://doi.acm.org/10.1145/570810.570812}, url = {http://jmvidal.cse.sc.edu/library/feigenbaum02a.pdf}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/DIALM\%2702\%20-\%20Feigenbaum\%20\%26\%20Shenker\%20-\%20Distributed\%20algorithmic\%20mechanism\%20design.pdf}, }
@conference{Feigenbaum:2006:IIR:1134707.1134722, title = {Incentive-compatible interdomain routing}, author = {Feigenbaum, Joan and Ramachandran, Vijay and Schapira, Michael}, booktitle = {EC'06. Proceedings of the 7th ACM Conference on Electronic Commerce}, organization = {ACM}, year = {2006}, month = {June}, address = {Arbor, Michigan}, pages = {130--139}, publisher = {ACM}, series = {EC '06}, abstract = {The routing of traffic between Internet domains, or Autonomous Systems (ASes), a task known as interdomain routing, is currently handled by the Border Gateway Protocol (BGP). Using BGP, autonomous systems can apply semantically rich routing policies to choose interdomain routes in a distributed fashion. This expressiveness in routing-policy choice supports domains' autonomy in network operations and in business decisions, but it comes at a price: The interaction of locally defined routing policies can lead to unexpected global anomalies, including route oscillations or overall protocol divergence. Networking researchers have addressed this problem by devising constraints on policies that guarantee BGP convergence without unduly limiting expressiveness and autonomy.In addition to taking this engineering or "protocol-design" approach, researchers have approached interdomain routing from an economic or "mechanism-design" point of view. It is known that lowest-cost-path (LCP) routing can be implemented in a truthful, BGP-compatible manner but that several other natural classes of routing policies cannot. In this paper, we present a natural class of interdomain-routing policies that is more realistic than LCP routing and admits incentive-compatible, BGP-compatible implementation. We also present several positive steps toward a general theory of incentive-compatible interdomain routing}, www_section = {border gateway protocol (BGP), distributed algorithmic mechanism design, interdomain routing}, isbn = {1-59593-236-4}, doi = {http://doi.acm.org/10.1145/1134707.1134722}, url = {http://doi.acm.org/10.1145/1134707.1134722}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/EC\%2706\%20-\%20Incentive-compatible\%20interdomain\%20routing.pdf}, }
@conference{Feldman03quantifyingdisincentives, title = {Quantifying Disincentives in Peer-to-Peer Networks}, author = {Michal Feldman and Kevin Lai and John Chuang and Ion Stoica}, booktitle = {Workshop on Economics of Peer-to-Peer Systems}, year = {2003}, month = {June}, address = {Berkeley, CA}, abstract = {In this paper, we use modeling and simulation to better understand the effects of cooperation on user performance and to quantify the performance-based disincentives in a peer-to-peer file sharing system. This is the first step towards building an incentive system. For the models developed in this paper, we have the following results: Although performance improves significantly when cooperation increases from low to moderate levels, the improvement diminishes thereafter. In particular, the mean delay to download a file when 5\% of the nodes share files is 8x more than when 40\% of the nodes share files, while the mean download delay when 40\% of the nodes share is only 1.75x more than when 100\% share}, www_section = {incentives, peer-to-peer networking}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Feldman\%2C\%20Lai\%2C\%20Chuang\%20\%26\%20Stoica\%20-\%20Quantifying\%20disincentives\%20in\%20peer-to-peer\%20networks.pdf}, }
@conference{Feldman:2004:FWP:1016527.1016539, title = {Free-riding and whitewashing in peer-to-peer systems}, author = {Michal Feldman and Papadimitriou, Christos and John Chuang and Ion Stoica}, booktitle = {PINS'04. Proceedings of the ACM SIGCOMM Workshop on Practice and Theory of Incentives in Networked Systems}, organization = {ACM}, year = {2004}, month = {August}, address = {Portland, OR}, pages = {228--236}, publisher = {ACM}, series = {PINS '04}, abstract = {We develop a model to study the phenomenon of free-riding in peer-to-peer (P2P) systems. At the heart of our model is a user of a certain type, an intrinsic and private parameter that reflects the user's willingness to contribute resources to the system. A user decides whether to contribute or free-ride based on how the current contribution cost in the system compares to her type. When the societal generosity (i.e., the average type) is low, intervention is required in order to sustain the system. We present the effect of mechanisms that exclude low type users or, more realistic, penalize free-riders with degraded service. We also consider dynamic scenarios with arrivals and departures of users, and with whitewashers: users who leave the system and rejoin with new identities to avoid reputational penalties. We find that when penalty is imposed on all newcomers in order to avoid whitewashing, system performance degrades significantly only when the turnover rate among users is high}, www_section = {cheap pseudonyms, cooperation, equilibrium, exclusion, free-riding, identity cost, incentives, peer-to-peer networking, whitewashing}, isbn = {1-58113-942-X}, doi = {http://doi.acm.org/10.1145/1016527.1016539}, url = {http://doi.acm.org/10.1145/1016527.1016539}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/PINS\%2704\%20-\%20\%20Free-riding\%20and\%20whitewashing\%20in\%20P2P\%20systems.pdf}, }
@conference{Feldman:2004:RIT:988772.988788, title = {Robust incentive techniques for peer-to-peer networks}, author = {Michal Feldman and Kevin Lai and Ion Stoica and John Chuang}, booktitle = {EC'04. Proceedings of the 5th ACM Conference on Electronic Commerce}, organization = {ACM}, year = {2004}, month = {May}, address = {New York, NY, USA}, pages = {102--111}, publisher = {ACM}, series = {EC '04}, abstract = {Lack of cooperation (free riding) is one of the key problems that confronts today's P2P systems. What makes this problem particularly difficult is the unique set of challenges that P2P systems pose: large populations, high turnover, a symmetry of interest, collusion, zero-cost identities, and traitors. To tackle these challenges we model the P2P system using the Generalized Prisoner's Dilemma (GPD),and propose the Reciprocative decision function as the basis of a family of incentives techniques. These techniques are fullydistributed and include: discriminating server selection, maxflow-based subjective reputation, and adaptive stranger policies. Through simulation, we show that these techniques can drive a system of strategic users to nearly optimal levels of cooperation}, www_section = {cheap pseudonyms, collusion, free-riding, incentives, peer-to-peer networking, prisoners dilemma, reputation, whitewash, whitewashing}, isbn = {1-58113-771-0}, doi = {http://doi.acm.org/10.1145/988772.988788}, url = {http://doi.acm.org/10.1145/988772.988788}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/EC\%2704\%20-\%20Robust\%20incentive\%20techniques\%20for\%20P2P\%20networks.pdf}, }
@conference{Feldman:2005:HMR:1064009.1064022, title = {Hidden-action in multi-hop routing}, author = {Michal Feldman and John Chuang and Ion Stoica and S Shenker}, booktitle = {EC'05. Proceedings of the 6th ACM Conference on Electronic Commerce}, organization = {ACM}, year = {2005}, month = {June}, address = {Vancouver, Canada}, pages = {117--126}, publisher = {ACM}, series = {EC '05}, abstract = {In multi-hop networks, the actions taken by individual intermediate nodes are typically hidden from the communicating endpoints; all the endpoints can observe is whether or not the end-to-end transmission was successful. Therefore, in the absence of incentives to the contrary, rational (i.e., selfish) intermediate nodes may choose to forward packets at a low priority or simply not forward packets at all. Using a principal-agent model, we show how the hidden-action problem can be overcome through appropriate design of contracts, in both the direct (the endpoints contract with each individual router) and recursive (each router contracts with the next downstream router) cases. We further demonstrate that per-hop monitoring does not necessarily improve the utility of the principal or the social welfare in the system. In addition, we generalize existing mechanisms that deal with hidden-information to handle scenarios involving both hidden-information and hidden-action}, www_section = {contracts, hidden-action, incentives, mechanism design, moral-hazard, multi-hop, principal-agent model, routing}, isbn = {1-59593-049-3}, doi = {http://doi.acm.org/10.1145/1064009.1064022}, url = {http://doi.acm.org/10.1145/1064009.1064022}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/EC\%2705\%20-\%20Hidden-action\%20in\%20multi-hop\%20routing.pdf}, }
@article{Feldman:2005:OFB:1120717.1120723, title = {Overcoming free-riding behavior in peer-to-peer systems}, author = {Michal Feldman and John Chuang}, journal = {ACM SIGecom Exchanges}, volume = {5}, year = {2005}, month = {July}, address = {New York, NY, USA}, pages = {41--50}, publisher = {ACM}, abstract = {While the fundamental premise of peer-to-peer (P2P) systems is that of voluntary resource sharing among individual peers, there is an inherent tension between individual rationality and collective welfare that threatens the viability of these systems. This paper surveys recent research at the intersection of economics and computer science that targets the design of distributed systems consisting of rational participants with diverse and selfish interests. In particular, we discuss major findings and open questions related to free-riding in P2P systems: factors affecting the degree of free-riding, incentive mechanisms to encourage user cooperation, and challenges in the design of incentive mechanisms for P2P systems}, www_section = {algorithms, cooperation, design, economics, game-theory, hidden-action, hidden-information, incentives, peer-to-peer networking, performance, reliability}, issn = {1551-9031}, doi = {http://doi.acm.org/10.1145/1120717.1120723}, url = {http://doi.acm.org/10.1145/1120717.1120723}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/SIGecom\%20Exch.\%20\%285\%29\%20-\%20Overcoming\%20free-riding\%20behavior.pdf}, }
@conference{FessiIPTComm2010, title = {Pr2-P2PSIP: Privacy Preserving P2P Signaling for VoIP and IM}, author = {Fessi, Ali and Nathan S Evans and Heiko Niedermayer and Ralph Holz}, booktitle = {Principles, Systems and Applications of IP Telecommunications (IPTComm), Munich}, year = {2010}, month = {August}, address = {Munich, Germany}, pages = {141--152}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/fessi_iptcomm_2010.pdf}, url = {https://bibliography.gnunet.org}, www_section = {Unsorted}, }
@booklet{Fiat02censorshipresistant, title = {Censorship Resistant Peer-to-Peer Content Addressable Networks}, author = {Amos Fiat and Jared Saia}, year = {2002}, abstract = {We present a censorship resistant peer-to-peer network for accessing n data items in a network of n nodes. Each search for a data item in the network takes O(log n) time and requires at most O(log2n) messages. Our network is censorship resistant in the sense that even after adversarial removal of an arbitrarily large constant fraction of the nodes in the network, all but an arbitrarily small fraction of the remaining nodes can obtain all but an arbitrarily small fraction of the original data items. The network can be created in a fully distributed fashion. It requires only O(log n) memory in each node. We also give a variant of our scheme that has the property that it is highly spam resistant: an adversary can take over complete control of a constant fraction of the nodes in the network and yet will still be unable to generate spam}, url = {http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.16.4761\&rep=rep1\&type=pdf}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.16.4761.pdf}, %%%%% ERROR: Missing field % www_section = {?????}, }
@conference{Fiat05makingchord, title = {Making chord robust to byzantine attacks}, author = {Amos Fiat and Jared Saia and Maxwell Young}, booktitle = {In Proc. of the European Symposium on Algorithms (ESA)}, organization = {Springer}, year = {2005}, pages = {803--814}, publisher = {Springer}, abstract = {Chord is a distributed hash table (DHT) that requires only O(log n) links per node and performs searches with latency and message cost O(log n), where n is the number of peers in the network. Chord assumes all nodes behave according to protocol. We give a variant of Chord which is robust with high probability for any time period during which: 1) there are always at least z total peers in the network for some integer z; 2) there are never more than (1/4--{\epsilon})z Byzantine peers in the network for a fixed {\epsilon} > 0; and 3) the number of peer insertion and deletion events is no more than zk for some tunable parameter k. We assume there is an adversary controlling the Byzantine peers and that the IP-addresses of all the Byzantine peers and the locations where they join the network are carefully selected by this adversary. Our notion of robustness is rather strong in that we not only guarantee that searches can be performed but also that we can enforce any set of {\textquotedblleft}proper behavior{\textquotedblright} such as contributing new material, etc. In comparison to Chord, the resources required by this new variant are only a polylogarithmic factor greater in communication, messaging, and linking costs}, www_section = {Chord, distributed hash table, robustness}, doi = {10.1007/11561071}, url = {http://www.springerlink.com/content/422llxn7khwej72n/}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/swarm.pdf}, }
@conference{Fitzi:2006:OEM:1146381.1146407, title = {Optimally efficient multi-valued byzantine agreement}, author = {Fitzi, Matthias and Hirt, Martin}, booktitle = {Proceedings of the twenty-fifth annual ACM symposium on Principles of distributed computing}, organization = {ACM}, year = {2006}, address = {New York, NY, USA}, pages = {163--168}, publisher = {ACM}, series = {PODC '06}, www_section = {byzantine agreement, communication complexity, cryptographic security, information-theoretic security}, isbn = {1-59593-384-0}, doi = {10.1145/1146381.1146407}, url = {http://doi.acm.org/10.1145/1146381.1146407}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/FitHir06.pdf}, }
@article{Fragouli:2006:NCI:1111322.1111337, title = {Network Coding: an Instant Primer}, author = {Fragouli, Christina and Jean-Yves Le Boudec and J{\"o}rg Widmer}, journal = {SIGCOMM Computer Communication Review}, volume = {36}, year = {2006}, month = {January}, address = {New York, NY, USA}, pages = {63--68}, publisher = {ACM}, abstract = {Network coding is a new research area that may have interesting applications in practical networking systems. With network coding, intermediate nodes may send out packets that are linear combinations of previously received information. There are two main benefits of this approach: potential throughput improvements and a high degree of robustness. Robustness translates into loss resilience and facilitates the design of simple distributed algorithms that perform well, even if decisions are based only on partial information. This paper is an instant primer on network coding: we explain what network coding does and how it does it. We also discuss the implications of theoretical results on network coding for realistic settings and show how network coding can be used in practice}, www_section = {network coding}, issn = {0146-4833}, doi = {http://doi.acm.org/10.1145/1111322.1111337}, url = {http://doi.acm.org/10.1145/1111322.1111337}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/SIGCOMM\%20Comput.\%20Commun.\%20Rev\%20-\%20Network\%20Coding\%3A\%20an\%20Instant\%20Primer.pdf}, }
@booklet{Freedman02introducingtarzan, title = {Introducing Tarzan, a Peer-to-Peer Anonymizing Network Layer}, author = {Michael J. Freedman and Emil Sit and Josh Cates and Robert Morris}, journal = {Revised Papers from the First International Workshop on Peer-to-Peer Systems}, volume = {Vol. 2429}, year = {2002}, pages = {121--129}, abstract = {We introduce Tarzan, a peer-to-peer anonymous network layer that provides generic IP forwarding. Unlike prior anonymizing layers, Tarzan is flexible, transparent, decentralized, and highly scalable. Tarzan achieves these properties by building anonymous IP tunnels between an open-ended set of peers. Tarzan can provide anonymity to existing applications, such as web browsing and file sharing, without change to those applications. Performance tests show that Tarzan imposes minimal overhead over a corresponding non-anonymous overlay route}, isbn = {3-540-44179-4}, url = {http://www.cs.rice.edu/Conferences/IPTPS02/182.pdf}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/tarzan.pdf}, %%%%% ERROR: Missing field % www_section = {?????}, }
@booklet{Freedman03sloppyhashing, title = {Sloppy Hashing and Self-Organizing Clusters}, author = {Michael J. Freedman and David Mazi{\`e}res}, journal = {In IPTPS}, volume = {Volume 2735/2003}, year = {2003}, pages = {45--55}, publisher = {Springer Berlin / Heidelberg}, abstract = {We are building Coral, a peer-to-peer content distribution system. Coral creates self-organizing clusters of nodes that fetch information from each other to avoid communicating with more distant or heavily-loaded servers. Coral indexes data, but does not store it. The actual content resides where it is used, such as in nodes' local web caches. Thus, replication happens exactly in proportion to demand}, isbn = {978-3-540-40724-9}, url = {www.coralcdn.org/docs/coral-iptps03.ps}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/coral-iptps03.pdf}, %%%%% ERROR: Missing field % www_section = {?????}, }
@conference{Fu:2002:FSD:505452.505453, title = {Fast and secure distributed read-only file system}, author = {Kevin Fu and Frans M. Kaashoek and David Mazi{\`e}res}, booktitle = {OSDI 2000--Proceedings of the 4th USENIX Symposium on Operating Systems Design and Implementation}, organization = {ACM}, volume = {20}, year = {2002}, month = {October}, address = {San Diego, CA, USA}, pages = {1--24}, publisher = {ACM}, abstract = {Internet users increasingly rely on publicly available data for everything from software installation to investment decisions. Unfortunately, the vast majority of public content on the Internet comes with no integrity or authenticity guarantees. This paper presents the self-certifying read-only file system, a content distribution system providing secure, scalable access to public, read-only data. The read-only file system makes the security of published content independent from that of the distribution infrastructure. In a secure area (perhaps off-line), a publisher creates a digitally-signed database out of a file system's contents. The publisher then replicates the database on untrusted content-distribution servers, allowing for high availability. The read-only file system protocol furthermore pushes the cryptographic cost of content verification entirely onto clients, allowing servers to scale to a large number of clients. Measurements of an implementation show that an individual server running on a 550 Mhz Pentium III with FreeBSD can support 1,012 connections per second and 300 concurrent clients compiling a large software package}, www_section = {file systems, read-only, security}, issn = {0734-2071}, doi = {http://doi.acm.org/10.1145/505452.505453}, url = {http://doi.acm.org/10.1145/505452.505453}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/OSDI\%2700\%20-\%20Fast\%20and\%20Secure\%20Distributed\%20Read-Only\%20File\%20System.pdf}, }
@conference{Fu::FlowMarking::2005, title = {On Flow Marking Attacks in Wireless Anonymous Communication Networks}, author = {Xinwen Fu and Ye Zhu and Bryan Graham and Riccardo Bettati and Wei Zhao}, booktitle = {Proceedings of the IEEE International Conference on Distributed Computing Systems (ICDCS)}, organization = {IEEE Computer Society Washington, DC, USA}, year = {2005}, month = {April}, publisher = {IEEE Computer Society Washington, DC, USA}, abstract = {This paper studies the degradation of anonymity in a flow-based wireless mix network under flow marking attacks, in which an adversary embeds a recognizable pattern of marks into wireless traffic flows by electromagnetic interference. We find that traditional mix technologies are not effective in defeating flow marking attacks, and it may take an adversary only a few seconds to recognize the communication relationship between hosts by tracking suchartificial marks. Flow marking attacks utilize frequency domain analytical techniques and convert time domain marks into invariant feature frequencies. To counter flow marking attacks, we propose a new countermeasure based on digital filtering technology, and show that this filter-based counter-measure can effectively defend a wireless mix network from flow marking attacks}, www_section = {802.11, anonymity, Bluetooth, flow marking attack}, isbn = {0-7695-2331-5}, url = {http://portal.acm.org/citation.cfm?id=1069397}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Fu--FlowMarking--2005.pdf}, }
@conference{Fuhrmann03resultson, title = {Results on the practical feasibility of programmable network services}, author = {Thomas Fuhrmann and Till Harbaum and Panos Kassianidis and Marcus Schoeller and Martina Zitterbart}, booktitle = {In 2nd International Workshop on Active Network Technologies and Applications (ANTA)}, year = {2003}, abstract = {Active and programmable networks have been subject to intensive and successful research activities during the last couple of years. Many ideas and concepts have been pursued. However, only a few prototype implementations that have been developed so far, can deal with different applications in a larger scale setting. Moreover, detailed performance analyses of such prototypes are greatly missing today. Therefore, this paper does not present yet another architecture for active and programmable networks. In contrast, it rather focuses on the performance evaluation of the so-called AMnet approach that has already been presented previously [1]. As such, the paper demonstrates that an operational high-performance programmable network system with AAA (authentication, authorization, and accounting) security functionality will in fact be feasible in the near future}, www_section = {programmable networks}, url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.67.3074}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/fuhrmann03performance.pdf}, }
@conference{Fuhrmann05scalablerouting, title = {Scalable routing for networked sensors and actuators}, author = {Thomas Fuhrmann}, booktitle = {In Proceedings of the Second Annual IEEE Communications Society Conference on Sensor and Ad Hoc Communications and Networks}, year = {2005}, abstract = {The design of efficient routing protocols for ad hoc and sensor networks is challenging for several reasons: Physical network topology is random. Nodes have limited computation and memory capabilities. Energy and bisection bandwidth are scarce. Furthermore, in most settings, the lack of centralized components leaves all network control tasks to the nodes acting as decentralized peers. In this paper, we present a novel routing algorithm, scalable source routing (SSR), which is capable of memory and message efficient routing in large random networks. A guiding example is a community of 'digital homes ' where smart sensors and actuators are installed by laypersons. Such networks combine wireless ad-hoc and infrastructure networks, and lack a well-crafted network topology. Typically, the nodes do not have sufficient processing and memory resources to perform sophisticated routing algorithms. Flooding on the other hand is too bandwidthconsuming in the envisaged large-scale networks. SSR is a fully self-organizing routing protocol for such scenarios. It creates a virtual ring that links all nodes via predecessor/successor source routes. Additionally, each node possesses O(log N) short-cut source routes to nodes in exponentially increasing virtual ring distance. Like with the Chord overlay network, this ensures full connectivity within the network. Moreover, it provides a routing semantic which can efficiently support indirection schemes like i3. Memory and message efficiency are achieved by the introduction of a route cache together with a set of path manipulation rules that allow to produce near-to-optimal paths}, www_section = {scalable source routing, sensor networks, wireless sensor network}, url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.67.6509}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.67.6509.pdf}, }
@booklet{Fuhrmann_anode, title = {A Node Evaluation Mechanism for Service Setup in}, author = {Thomas Fuhrmann and Marcus Schoeller and Christina Schmidt and Martina Zitterbart}, year = {2003}, abstract = {AMnet is a programmable network that aims at the flexible and rapid creation of services within an IP network. Examples for typical services include network layer enhancements e.g. for multicast and mobility, transport layer enhancements e.g. to integrate wireless LANs, and various application layer services e.g. for media transcoding and content distribution. AMnet is based on regular Linux boxes that run an execution environment (EE), a resource monitor, and a basic signaling-engine. These so-called active nodes run the services and provide support for resource-management and module-relocation. Services are created by service modules, small pieces of code, that are executed within the EE. Based on the standard netfilter mechanism of Linux, service modules have full access to the network traffic passing through the active node. This paper describes the evaluation mechanism for service setup in AMnet. In order to determine where a service module can be started, service modules are accompanied by evaluation modules. This allows service module authors to implement various customized strategies for node-selection and service setup. Examples that are supported by the AMnet evaluation mechanism are a) service setup at a fixed position, e.g. as gateway, b) along a fixed path (with variable position along that path), c) at variable positions inside the network with preferences for certain constellations, or d) at an unspecified position, e.g. for modification of multicasted traffic. The required path information is gathered by the AMnodes present in the network. By interaction with the resource monitors of the AMnodes and the service module repository of the respective administrative domain, the AMnet evaluation also ensures overall system security and stability}, url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.69.8749}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/fuhrmann03evaluation.pdf}, %%%%% ERROR: Missing field % www_section = {?????}, }
@booklet{Fuhrmann_aplatform, title = {A platform for lab exercises in sensor networks}, author = {Thomas Fuhrmann and Till Harbaum}, year = {2005}, abstract = {Programming of and experiences with sensor network nodes are about to enter the curricula of technical universities. Often however, practical obstacles complicate the implementation of a didactic concept. In this paper we present our approach that uses a Java virtual machine to decouple experiments with algorithm and protocol concepts from the odds of embedded system programming. This concept enables students to load Java classes via an SD-card into a sensor node. An LC display provides detailed information if the program aborts due to bugs}, www_section = {sensor networks}, url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.72.8036}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.72.8036.pdf}, }
@booklet{Fuhrmann_networkservices, title = {Network Services for the Support of Very-Low-Resource Devices}, author = {Thomas Fuhrmann and Till Harbaum and Martina Zitterbart}, year = {2003}, abstract = {Visions of future computing scenarios envisage a multitude of very-low-resource devices linked by power-efficient wireless communication means. This paper presents our vision of such a scenario. From this vision requirements are derived for an infrastructure that is able to satisfy the largely differing needs of these devices. The paper also shows how innovative, collaborating applications between distributed sensors and actuators can arise from such an infrastructure. The realization of such innovative applications is illustrated with two examples of straightforward services that have been implemented with the AMnet infrastructure that is currently being developed in the FlexiNet project. Additionally, first performance measurements for one of these services are given. Index terms {\textemdash} Bluetooth, Programmable networks, Sensoractuator networks}, url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.69.186}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/ASWN2003.pdf}, %%%%% ERROR: Missing field % www_section = {?????}, }
@booklet{Fuhrmann_usingbluetooth, title = {Using Bluetooth for Informationally Enhanced Environments Abstract}, author = {Thomas Fuhrmann and Till Harbaum}, year = {2003}, abstract = {The continued miniaturization in computing and wireless communication is about to make informationally enhanced environments become a reality. Already today, devices like a notebook computer or a personal digital assistent (PDA) can easily connect to the Internet via IEEE 802.11 networks (WaveLAN) or similar technologies provided at so-called hot-spots. In the near future, even smaller devices can join a wireless network to exchange status information or send and receive commands. In this paper, we present sample uses of a generic Bluetooth component that we have developed and that has been successfully integrated into various mininature devices to transmit sensor data or exchange control commands. The use of standard protocols like TCP/IP, Obex, and HTTP simplifies the use of those devices with conventional devices (notebook, PDA, cell-phone) without even requiring special drivers or applications for these devices. While such scenarios have already often been dreamt of, we are able to present a working solution based on small and cost-effective standard elements. We describe two applications that illustrate the power this approach in the broad area of e-commerce, e-learning, and e-government: the BlueWand, a small, pen-like device that can control Bluetooth devices in its vincinity by simple gestures, and a door plate that can display messages that are posted to it e.g. by a Bluetooth PDA. Keywords: Human-Computer Interaction, Ubiquitous Computing, Wireless Communications (Bluetooth)}, www_section = {Bluetooth, ubiquitous computing}, url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.73.2131}, }
@conference{GHPvR05, title = {Provable Anonymity}, author = {Flavio D. Garcia and Ichiro Hasuo and Wolter Pieters and Peter van Rossum}, booktitle = {Proceedings of the 3rd ACM Workshop on Formal Methods in Security Engineering (FMSE05)}, year = {2005}, month = {November}, address = {Alexandria, VA, USA}, abstract = {This paper provides a formal framework for the analysis of information hiding properties of anonymous communication protocols in terms of epistemic logic.The key ingredient is our notion of observational equivalence, which is based on the cryptographic structure of messages and relations between otherwise random looking messages. Two runs are considered observationally equivalent if a spy cannot discover any meaningful distinction between them.We illustrate our approach by proving sender anonymity and unlinkability for two anonymizing protocols, Onion Routing and Crowds. Moreover, we consider a version of Onion Routing in which we inject a subtle error and show how our framework is capable of capturing this flaw}, www_section = {cryptography, onion routing}, isbn = {1-59593-231-3}, doi = {10.1145/1103576.1103585}, url = {http://portal.acm.org/citation.cfm?id=1103576.1103585}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/GHPvR05.pdf}, }
@conference{GKK03, title = {Rapid Mixing and Security of Chaum's Visual Electronic Voting}, author = {Marcin Gomulkiewicz and Marek Klonowski and Miroslaw Kutylowski}, booktitle = {Proceedings of ESORICS 2003}, organization = {Springer Berlin / Heidelberg}, year = {2003}, month = {October}, publisher = {Springer Berlin / Heidelberg}, abstract = {Recently, David Chaum proposed an electronic voting scheme that combines visual cryptography and digital processing. It was designed to meet not only mathematical security standards, but also to be accepted by voters that do not trust electronic devices. In this scheme mix-servers are used to guarantee anonymity of the votes in the counting process. The mix-servers are operated by different parties, so an evidence of their correct operation is necessary. For this purpose the protocol uses randomized partial checking of Jakobsson et al., where some randomly selected connections between the (encoded) inputs and outputs of a mix-server are revealed. This leaks some information about the ballots, even if intuitively this information cannot be used for any efficient attack. We provide a rigorous stochastic analysis of how much information is revealed by randomized partial checking in the Chaums protocol. We estimate how many mix-servers are necessary for a fair security level. Namely, we consider probability distribution of the permutations linking the encoded votes with the decoded votes given the information revealed by randomized partial checking. We show that the variation distance between this distribution and the uniform distribution is already for a constant number of mix-servers (n is the number of voters). This means that a constant number of trustees in the Chaums protocol is enough to obtain provable security. The analysis also shows that certain details of the Chaums protocol can be simplified without lowering security level}, www_section = {electronic voting, Markov chain, path coupling, randomized partial checking, rapid mixing}, isbn = {978-3-540-20300-1}, doi = {10.1007/b13237}, url = {http://www.springerlink.com/content/5gmj68nn4x1xc4j1/}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/GKK03.pdf}, }
@conference{Gairing:2005:SRI:1073970.1074000, title = {Selfish Routing with Incomplete Information}, author = {Gairing, Martin and Monien, Burkhard and Tiemann, Karsten}, booktitle = {SPAA'05. Proceedings of the 17th Annual ACM Symposium on Parallelism in Algorithms and Architectures}, organization = {ACM}, year = {2005}, month = {July}, address = {Las Vegas, Nevada}, pages = {203--212}, publisher = {ACM}, series = {SPAA '05}, abstract = {In his seminal work Harsanyi introduced an elegant approach to study non-cooperative games with incomplete information where the players are uncertain about some parameters. To model such games he introduced the Harsanyi transformation, which converts a game with incomplete information to a strategic game where players may have different types. In the resulting Bayesian game players' uncertainty about each others types is described by a probability distribution over all possible type profiles.In this work, we introduce a particular selfish routing game with incomplete information that we call Bayesian routing game. Here, n selfish users wish to assign their traffic to one of m links. Users do not know each others traffic. Following Harsanyi's approach, we introduce for each user a set of possible types.This paper presents a comprehensive collection of results for the Bayesian routing game.We prove, with help of a potential function, that every Bayesian routing game possesses a pure Bayesian Nash equilibrium. For the model of identical links and independent type distribution we give a polynomial time algorithm to compute a pure Bayesian Nash equilibrium.We study structural properties of fully mixed Bayesian Nash equilibria for the model of identical links and show that they maximize individual cost. In general there exists more than one fully mixed Bayesian Nash equilibrium. We characterize the class of fully mixed Bayesian Nash equilibria in the case of independent type distribution.We conclude with results on coordination ratio for the model of identical links for three social cost measures, that is, social cost as expected maximum congestion, sum of individual costs and maximum individual cost. For the latter two we are able to give (asymptotic) tight bounds using our results on fully mixed Bayesian Nash equilibria.To the best of our knowledge this is the first time that mixed Bayesian Nash equilibria have been studied in conjunction with social cost}, www_section = {bayesian game, coordination ratio, incomplete information, nash equilibria, selfish routing}, isbn = {1-58113-986-1}, doi = {http://doi.acm.org/10.1145/1073970.1074000}, url = {http://doi.acm.org/10.1145/1073970.1074000}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/SPAA\%2705\%20-\%20Selfish\%20routing\%20with\%20incomplete\%20information.pdf}, }
@conference{Garbacki:2007:ATP:1270401.1271766, title = {An Amortized Tit-For-Tat Protocol for Exchanging Bandwidth instead of Content in P2P Networks}, author = {Garbacki, Pawel and Epema, Dick H. J. and van Steen, Maarten}, booktitle = {SASO 2007. Proceedings of the First International Conference on Self-Adaptive and Self-Organizing Systems}, organization = {IEEE Computer Society}, year = {2007}, month = {July}, address = {Boston, Massachusetts}, pages = {119--128}, publisher = {IEEE Computer Society}, series = {SASO '07}, abstract = {Incentives for resource sharing are crucial for the proper operation of P2P networks. The principle of the incentive mechanisms in current content sharing P2P networks such as BitTorrent is to have peers exchange content of mutual interest. As a consequence, a peer can actively participate in the system only if it shares content that is of immediate interest to other peers. In this paper we propose to lift this restriction by using bandwidth rather than content as the resource upon which incentives are based. Bandwidth, in contrast to content, is independent of peer interests and so can be exchanged between any two peers. We present the design of a protocol called amortized tit-for-tat (ATFT) based on the bandwidth-exchange concept. This protocol defines mechanisms for bandwidth exchange corresponding to those in BitTorrent for content exchange, in particular for finding bandwidth borrowers that amortize the bandwidth borrowed in the past with their currently idle bandwidth. In addition to the formally proven incentives for bandwidth contributions, ATFT provides natural solutions to the problems of peer bootstrapping, seeding incentive, peer link asymmetry, and anonymity, which have previously been addressed with much more complex designs. Experiments with a realworld dataset confirm that ATFT is efficient in enforcing bandwidth contributions and results in download performance better than provided by incentive mechanisms based on content exchange}, isbn = {0-7695-2906-2}, doi = {http://dx.doi.org/10.1109/SASO.2007.9}, url = {http://dx.doi.org/10.1109/SASO.2007.9}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/SASO\%2707\%20-\%20Garbacki\%2C\%20Epema\%20\%26\%20van\%20Steen.pdf}, %%%%% ERROR: Missing field % www_section = {?????}, }
@conference{Garbacki:2007:ATP:1270401.1271766_0, title = {An Amortized Tit-For-Tat Protocol for Exchanging Bandwidth instead of Content in P2P Networks}, author = {Garbacki, Pawel and Epema, Dick H. J. and van Steen, Maarten}, booktitle = {SASO 2007. Proceedings of the First International Conference on Self-Adaptive and Self-Organizing Systems}, organization = {IEEE Computer Society}, year = {2007}, month = {July}, address = {Boston, Massachusetts}, pages = {119--128}, publisher = {IEEE Computer Society}, series = {SASO '07}, abstract = {Incentives for resource sharing are crucial for the proper operation of P2P networks. The principle of the incentive mechanisms in current content sharing P2P networks such as BitTorrent is to have peers exchange content of mutual interest. As a consequence, a peer can actively participate in the system only if it shares content that is of immediate interest to other peers. In this paper we propose to lift this restriction by using bandwidth rather than content as the resource upon which incentives are based. Bandwidth, in contrast to content, is independent of peer interests and so can be exchanged between any two peers. We present the design of a protocol called amortized tit-for-tat (ATFT) based on the bandwidth-exchange concept. This protocol defines mechanisms for bandwidth exchange corresponding to those in BitTorrent for content exchange, in particular for finding bandwidth borrowers that amortize the bandwidth borrowed in the past with their currently idle bandwidth. In addition to the formally proven incentives for bandwidth contributions, ATFT provides natural solutions to the problems of peer bootstrapping, seeding incentive, peer link asymmetry, and anonymity, which have previously been addressed with much more complex designs. Experiments with a realworld dataset confirm that ATFT is efficient in enforcing bandwidth contributions and results in download performance better than provided by incentive mechanisms based on content exchange}, www_section = {bandwidth exchange, p2p network, resource sharing, tit-for-tat}, isbn = {0-7695-2906-2}, doi = {http://dx.doi.org/10.1109/SASO.2007.9}, url = {http://dx.doi.org/10.1109/SASO.2007.9}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/SASO\%2707\%20-\%20Garbacki\%2C\%20Epema\%20\%26\%20van\%20Steen.pdf}, }
@conference{Garces-Erice2004DataIndexing, title = {Data Indexing in Peer-to-Peer DHT Networks}, author = {L Garc{\'e}s-Erice and Felber, P. A. and E W Biersack and Urvoy-Keller, G. and Ross, K. W.}, booktitle = {Proceedings of the 24th International Conference on Distributed Computing Systems (ICDCS'04)}, organization = {IEEE Computer Society}, year = {2004}, address = {Washington, DC, USA}, pages = {200--208}, publisher = {IEEE Computer Society}, series = {ICDCS '04}, isbn = {0-7695-2086-3}, url = {http://dl.acm.org/citation.cfm?id=977400.977979}, %%%%% ERROR: Missing field % www_section = {?????}, }
@conference{Garcia05off-linekarma:, title = {Off-line Karma: A Decentralized Currency for Peer-to-peer and Grid Applications}, author = {Flavio D. Garcia and Jaap-Henk Hoepman}, booktitle = {ACNS'05. 3rd Applied Cryptography and Network Security Conference}, organization = {Springer}, volume = {3531}, year = {2005}, month = {June}, address = {New York, NY, USA}, pages = {364--377}, publisher = {Springer}, series = {Lecture Notes in Computer Science}, abstract = {Peer-to-peer (P2P) and grid systems allow their users to exchange information and share resources, with little centralised or hierarchical control, instead relying on the fairness of the users to make roughly as much resources available as they use. To enforce this balance, some kind of currency or barter (called karma) is needed that must be exchanged for resources thus limiting abuse. We present a completely decentralised, off-line karma implementation for P2P and grid systems, that detects double-spending and other types of fraud under varying adversarial scenarios. The system is based on tracing the spending pattern of coins, and distributing the normally central role of a bank over a predetermined, but random, selection of nodes. The system is designed to allow nodes to join and leave the system at arbitrary times}, www_section = {decentralized, free-riding, GRID, micropayments, peer-to-peer networking, security}, doi = {10.1007/11496137_25}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/ACNS\%2705\%20-\%20Garcia\%20\%26\%20Hoepman\%20-\%20Off-line\%20Karma.pdf}, }
@conference{Gay03thenesc, title = {The nesC language: A holistic approach to networked embedded systems}, author = {David Gay and Matt Welsh and Philip Levis and Eric Brewer and Robert Von Behren and Culler, David}, booktitle = {In Proceedings of Programming Language Design and Implementation (PLDI)}, year = {2003}, pages = {1--11}, abstract = {We present nesC, a programming language for networked embedded systems that represent a new design space for application developers. An example of a networked embedded system is a sensor network, which consists of (potentially) thousands of tiny, low-power "motes," each of which execute concurrent, reactive programs that must operate with severe memory and power constraints.nesC's contribution is to support the special needs of this domain by exposing a programming model that incorporates event-driven execution, a flexible concurrency model, and component-oriented application design. Restrictions on the programming model allow the nesC compiler to perform whole-program analyses, including data-race detection (which improves reliability) and aggressive function inlining (which reduces resource consumption).nesC has been used to implement TinyOS, a small operating system for sensor networks, as well as several significant sensor applications. nesC and TinyOS have been adopted by a large number of sensor network research groups, and our experience and evaluation of the language shows that it is effective at supporting the complex, concurrent programming style demanded by this new class of deeply networked systems}, www_section = {data races, nesC, TinyOS}, doi = {10.1145/781131.781133}, url = {http://portal.acm.org/citation.cfm?id=781133}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.127.9488.pdf}, }
@conference{Godfrey05heterogeneityand, title = {Heterogeneity and Load Balance in Distributed Hash Tables}, author = {Godfrey, Brighten and Ion Stoica}, booktitle = {IN PROC. OF IEEE INFOCOM}, year = {2005}, abstract = {Existing solutions to balance load in DHTs incur a high overhead either in terms of routing state or in terms of load movement generated by nodes arriving or departing the system. In this paper, we propose a set of general techniques and use them to develop a protocol based on Chord, called Y0 , that achieves load balancing with minimal overhead under the typical assumption that the load is uniformly distributed in the identifier space. In particular, we prove that Y0 can achieve near-optimal load balancing, while moving little load to maintain the balance and increasing the size of the routing tables by at most a constant factor}, www_section = {Chord, distributed hash table, load balancing}, url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.61.6740}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/paper.pdf}, }
@article{Godfrey:2006:MCD:1151659.1159931, title = {Minimizing churn in distributed systems}, author = {Godfrey, Brighten and S Shenker and Ion Stoica}, journal = {SIGCOMM Computer Communication Review}, volume = {36}, year = {2006}, month = {August}, address = {New York, NY, USA}, pages = {147--158}, publisher = {ACM}, abstract = {A pervasive requirement of distributed systems is to deal with churn-change in the set of participating nodes due to joins, graceful leaves, and failures. A high churn rate can increase costs or decrease service quality. This paper studies how to reduce churn by selecting which subset of a set of available nodes to use.First, we provide a comparison of the performance of a range of different node selection strategies in five real-world traces. Among our findings is that the simple strategy of picking a uniform-random replacement whenever a node fails performs surprisingly well. We explain its performance through analysis in a stochastic model.Second, we show that a class of strategies, which we call "Preference List" strategies, arise commonly as a result of optimizing for a metric other than churn, and produce high churn relative to more randomized strategies under realistic node failure patterns. Using this insight, we demonstrate and explain differences in performance for designs that incorporate varying degrees of randomization. We give examples from a variety of protocols, including anycast, over-lay multicast, and distributed hash tables. In many cases, simply adding some randomization can go a long way towards reducing churn}, www_section = {churn, distributed hash table, multicast, node selection}, issn = {0146-4833}, doi = {http://doi.acm.org/10.1145/1151659.1159931}, url = {http://doi.acm.org/10.1145/1151659.1159931}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/SIGCOMM\%20Comp.\%20Comm.\%20Rev.\%20-\%20Minimizing\%20churn\%20in\%20distributed\%20systems.pdf}, }
@conference{Goh04secureindexes, title = {Secure Indexes}, author = {Eu-jin Goh}, booktitle = {In submission}, year = {2004}, url = {http://gnunet.org/papers/secureindex.pdf}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/secureindex.pdf}, %%%%% ERROR: Missing field % www_section = {?????}, }
@conference{Goldberg:2008:RTA:1402958.1402989, title = {Rationality and Traffic Attraction: Incentives for Honest Path Announcements in BGP}, author = {Goldberg, Sharon and Halevi, Shai and Jaggard, Aaron D. and Ramachandran, Vijay and Wright, Rebecca N.}, booktitle = {SIGCOMM'08. Proceedings of the ACM SIGCOMM 2008 Conference on Data Communication}, organization = {ACM}, year = {2008}, month = {October}, address = {Seattle, WA}, pages = {267--278}, publisher = {ACM}, series = {SIGCOMM Computer Communication Review}, abstract = {We study situations in which autonomous systems (ASes) may have incentives to send BGP announcements differing from the AS-level paths that packets traverse in the data plane. Prior work on this issue assumed that ASes seek only to obtain the best possible outgoing path for their traffic. In reality, other factors can influence a rational AS's behavior. Here we consider a more natural model, in which an AS is also interested in attracting incoming traffic (e.g., because other ASes pay it to carry their traffic). We ask what combinations of BGP enhancements and restrictions on routing policies can ensure that ASes have no incentive to lie about their data-plane paths. We find that protocols like S-BGP alone are insufficient, but that S-BGP does suffice if coupled with additional (quite unrealistic) restrictions on routing policies. Our game-theoretic analysis illustrates the high cost of ensuring that the ASes honestly announce data-plane paths in their BGP path announcements}, www_section = {as, autonomus system, bgp, incentives}, isbn = {978-1-60558-175-0}, doi = {http://doi.acm.org/10.1145/1402958.1402989}, url = {http://doi.acm.org/10.1145/1402958.1402989}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/SIGCOMM\%2708\%20-\%20Rationality\%20and\%20traffic\%20attraction.pdf}, }
@book{Goldreich98securemulti-party, title = {Secure Multi-Party Computation}, author = {Oded Goldreich}, booktitle = {The Foundations of Cryptography}, organization = {Cambridge University Press}, volume = {2}, year = {1998}, publisher = {Cambridge University Press}, url = {http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.11.2201\&rep=rep1\&type=pdf}, www_section = {Unsorted}, }
@article{Goldschlag99onionrouting, title = {Onion Routing for Anonymous and Private Internet Connections}, author = {David Goldschlag and Michael Reed and Paul Syverson}, journal = {Communications of the ACM}, volume = {42}, year = {1999}, pages = {39--41}, abstract = {this article's publication, the prototype network is processing more than 1 million Web connections per month from more than six thousand IP addresses in twenty countries and in all six main top level domains. [7] Onion Routing operates by dynamically building anonymous connections within a network of real-time Chaum Mixes [3]. A Mix is a store and forward device that accepts a number of fixed-length messages from numerous sources, performs cryptographic transformations on the messages, and then forwards the messages to the next destination in a random order. A single Mix makes tracking of a particular message either by specific bit-pattern, size, or ordering with respect to other messages difficult. By routing through numerous Mixes in the network, determining who is talking to whom becomes even more difficult. Onion Routing's network of core onion-routers (Mixes) is distributed, faulttolerant, and under the control of multiple administrative domains, so no single onion-router can bring down the network or compromise a user's privacy, and cooperation between compromised onion-routers is thereby confounded}, url = {http://www.onion-router.net/Publications/CACM-1999}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/onionrouting.pdf}, %%%%% ERROR: Missing field % www_section = {?????}, }
@conference{Golle01incentivesfor, title = {Incentives for Sharing in Peer-to-Peer Networks}, author = {Philippe Golle and Kevin Leyton-Brown and Ilya Mironov and Mark Lillibridge}, booktitle = {EC'01: Proceedings of the Second International Workshop on Electronic Commerce}, organization = {Springer-Verlag}, year = {2001}, address = {London, UK}, pages = {75--87}, publisher = {Springer-Verlag}, abstract = {We consider the free-rider problem in peer-to-peer file sharing networks such as Napster: that individual users are provided with no incentive for adding value to the network. We examine the design implications of the assumption that users will selfishly act to maximize their own rewards, by constructing a formal game theoretic model of the system and analyzing equilibria of user strategies under several novel payment mechanisms. We support and extend this work with results from experiments with a multi-agent reinforcement learning model}, www_section = {free-riding, incentives}, isbn = {3-540-42878-X}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.23.9004.pdf}, }
@conference{Golle:sp2006, title = {Deterring Voluntary Trace Disclosure in Re-encryption Mix Networks}, author = {Philippe Golle and XiaoFeng Wang and Jakobsson, Markus and Alex Tsow}, booktitle = {Proceedings of the 2006 IEEE Symposium on Security and Privacy}, organization = {IEEE CS}, year = {2006}, month = {May}, address = {Oakland, CA}, pages = {121--131}, publisher = {IEEE CS}, abstract = {Mix-networks, a family of anonymous messaging protocols, have been engineered to withstand a wide range of theoretical internal and external adversaries. An undetectable insider threat{\textemdash}voluntary partial trace disclosures by server administrators{\textemdash}remains a troubling source of vulnerability. An administrator's cooperation could be the resulting coercion, bribery, or a simple change of interests. While eliminating this insider threat is impossible, it is feasible to deter such unauthorized disclosures by bundling them with additional penalties. We abstract these costs with collateral keys, which grant access to customizable resources. This article introduces the notion of trace-deterring mix-networks, which encode collateral keys for every server-node into every end-to-end message trace. The network reveals no keying material when the input-to-output transitions of individual servers remain secret. Two permutation strategies for encoding key information into traces, mix-and-flip and all-or-nothing, are presented. We analyze their trade-offs with respect to computational efficiency, anonymity sets, and colluding message senders. Our techniques have sufficiently low overhead for deployment in large-scale elections, thereby providing a sort of publicly verifiable privacy guarantee}, www_section = {anonymity measurement, privacy, re-encryption}, doi = {10.1145/1698750.1698758}, url = {http://portal.acm.org/citation.cfm?id=1698750.1698758}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Golle-sp2006.pdf}, }
@conference{GolleJakobssonJuelsSyverson:universal04, title = {Universal Re-Encryption for Mixnets}, author = {Philippe Golle and Jakobsson, Markus and Ari Juels and Paul Syverson}, booktitle = {Proceedings of the 2004 RSA Conference, Cryptographer's track}, organization = {Springer Berlin / Heidelberg}, year = {2004}, month = {February}, address = {San Francisco, USA}, publisher = {Springer Berlin / Heidelberg}, abstract = {We introduce a new cryptographic technique that we call universal re-encryption. A conventional cryptosystem that permits re-encryption, such as ElGamal, does so only for a player with knowledge of the public key corresponding to a given ciphertext. In contrast, universal re-encryption can be done without knowledge of public keys. We propose an asymmetric cryptosystem with universal re-encryption that is half as efficient as standard ElGamal in terms of computation and storage. While technically and conceptually simple, universal re-encryption leads to new types of functionality in mixnet architectures. Conventional mixnets are often called upon to enable players to communicate with one another through channels that are externally anonymous, i.e., that hide information permitting traffic-analysis. Universal re-encryption lets us construct a mixnet of this kind in which servers hold no public or private keying material, and may therefore dispense with the cumbersome requirements of key generation, key distribution, and private-key management. We describe two practical mixnet constructions, one involving asymmetric input ciphertexts, and another with hybrid-ciphertext inputs}, www_section = {anonymity, private channels, universal re-encryption}, isbn = {978-3-540-20996-6}, doi = {10.1007/b95630}, url = {http://www.springerlink.com/content/1fu5qrb1a2kfe7f9/}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/GolleJakobssonJuelsSyverson-universal04.pdf}, }
@conference{Goyal:2006:AEF:1180405.1180418, title = {Attribute-based encryption for fine-grained access control of encrypted data}, author = {Goyal, Vipul and Pandey, Omkant and Amit Sahai and Waters, Brent}, booktitle = {CCS'06--Proceedings of the 13th ACM Conference on Computer and Communications Security}, organization = {ACM}, year = {2006}, month = {October}, address = {Alexandria, VA, USA}, pages = {89--98}, publisher = {ACM}, series = {CCS '06}, abstract = {As more sensitive data is shared and stored by third-party sites on the Internet, there will be a need to encrypt data stored at these sites. One drawback of encrypting data, is that it can be selectively shared only at a coarse-grained level (i.e., giving another party your private key). We develop a new cryptosystem for fine-grained sharing of encrypted data that we call Key-Policy Attribute-Based Encryption (KP-ABE). In our cryptosystem, ciphertexts are labeled with sets of attributes and private keys are associated with access structures that control which ciphertexts a user is able to decrypt. We demonstrate the applicability of our construction to sharing of audit-log information and broadcast encryption. Our construction supports delegation of private keys which subsumesHierarchical Identity-Based Encryption (HIBE)}, www_section = {access control, attribute-based encryption, audit logs, broadcast encryption, delegation, hierarchical identity-based encryption}, isbn = {1-59593-518-5}, doi = {http://doi.acm.org/10.1145/1180405.1180418}, url = {http://doi.acm.org/10.1145/1180405.1180418}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/CCS\%2706\%20-\%20Attributed-based\%20encryption\%20for\%20fine-grained\%20access\%20control\%20of\%20encrypted\%20data.pdf}, }
@conference{Grolimund06havelaar:a, title = {Havelaar: A Robust and Efficient Reputation System for Active Peer-to-Peer Systems}, author = {Dominik Grolimund and Luzius Meisser and Stefan Schmid and Roger Wattenhofer}, booktitle = {NetEcon'06. 1st Workshop on the Economics of Networked Systems Ann Arbor}, year = {2006}, month = {June}, address = {Ann Arbor, Michigan}, abstract = {Peer-to-peer (p2p) systems have the potential to harness huge amounts of resources. Unfortunately, however, it has been shown that most of today's p2p networks suffer from a large fraction of free-riders, which mostly consume resources without contributing much to the system themselves. This results in an overall performance degradation. One particularly interesting resource is bandwidth. Thereby, a service differentiation approach seems appropriate, where peers contributing higher upload bandwidth are rewarded with higher download bandwidth in return. Keeping track of the contribution of each peer in an open, decentralized environment, however, is not trivial; many systems which have been proposed are susceptible to false reports. Besides being prone to attacks, some solutions have a large communication and computation overhead, which can even be linear in the number of transactions{\textemdash}an unacceptable burden in practical and active systems. In this paper, we propose a reputation system which overcomes this scaling problem. Our analytical and simulation results are promising, indicating that the mechanism is accurate and efficient, especially when applied to systems where there are lots of transactions (e.g., due to erasure coding)}, www_section = {free-riding, harvelaar, P2P, peer-to-peer networking, performance degradation, reputation system}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/NetEcon\%2706\%20-\%20Harvelaar.pdf}, }
@conference{Grolimund:2006:CFT:1173705.1174355, title = {Cryptree: A Folder Tree Structure for Cryptographic File Systems}, author = {Dominik Grolimund and Luzius Meisser and Stefan Schmid and Roger Wattenhofer}, booktitle = {SRDS'06--Proceedings of the 25th IEEE Symposium on Reliable Distributed Systems}, organization = {IEEE Computer Society}, year = {2006}, month = {October}, address = {Leeds, UK}, pages = {189--198}, publisher = {IEEE Computer Society}, abstract = {We present Cryptree, a cryptographic tree structure which facilitates access control in file systems operating on untrusted storage. Cryptree leverages the file system's folder hierarchy to achieve efficient and intuitive, yet simple, access control. The highlights are its ability to recursively grant access to a folder and all its subfolders in constant time, the dynamic inheritance of access rights which inherently prevents scattering of access rights, and the possibility to grant someone access to a file or folder without revealing the identities of other accessors. To reason about and to visualize Cryptree, we introduce the notion of cryptographic links. We describe the Cryptrees we have used to enforce read and write access in our own file system. Finally, we measure the performance of the Cryptree and compare it to other approaches}, www_section = {cryptographic tree structure, cryptree, hierarchy, untrusted storage}, isbn = {0-7695-2677-2}, doi = {http://dx.doi.org/10.1109/SRDS.2006.15}, url = {http://dl.acm.org/citation.cfm?id=1173705.1174355}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/SRDS\%2706\%20-\%20Cryptree.pdf}, }
@conference{Gulcu96mixingemail, title = {Mixing email with babel}, author = {Ceki Gulcu and Gene Tsudik}, booktitle = {Symposium on Network and Distributed System Security}, year = {1996}, pages = {2--16}, abstract = {Increasingly large numbers of people communicate today via electronic means such as email or news forums. One of the basic properties of the current electronic communication means is the identification of the end-points. However, at times it is desirable or even critical to hide the identity and/or whereabouts of the end-points (e.g., human users) involved. This paper discusses the goals and desired properties of anonymous email in general and introduces the design and salient features of Babel anonymous remailer. Babel allows email users to converse electronically while remaining anonymous with respect to each other and to other-- even hostile--parties. A range of attacks and corresponding countermeasures is considered. An attempt is made to formalize and quantify certain dimensions of anonymity and untraceable communication}, url = {http://eprints.kfupm.edu.sa/50994/1/50994.pdf}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/babel.pdf}, www_section = {Unsorted}, }
@conference{Gummadi:2003:IDR:863955.863998, title = {The impact of DHT routing geometry on resilience and proximity}, author = {Krishna Phani Gummadi and Gummadi, Ramakrishna and Steven D. Gribble and Sylvia Paul Ratnasamy and S Shenker and Ion Stoica}, booktitle = {SIGCOMM '03--Proceedings of the 2003 Conference on Applications, Technologies, Architectures, and Protocols for Computer Communications}, organization = {ACM}, year = {2003}, month = {August}, address = {Karlsruhe, Germany}, pages = {381--394}, publisher = {ACM}, series = {SIGCOMM '03}, abstract = {The various proposed DHT routing algorithms embody several different underlying routing geometries. These geometries include hypercubes, rings, tree-like structures, and butterfly networks. In this paper we focus on how these basic geometric approaches affect the resilience and proximity properties of DHTs. One factor that distinguishes these geometries is the degree of flexibility they provide in the selection of neighbors and routes. Flexibility is an important factor in achieving good static resilience and effective proximity neighbor and route selection. Our basic finding is that, despite our initial preference for more complex geometries, the ring geometry allows the greatest flexibility, and hence achieves the best resilience and proximity performance}, www_section = {distributed hash table, flexibility, routing geometry}, isbn = {1-58113-735-4}, doi = {http://doi.acm.org/10.1145/863955.863998}, url = {http://doi.acm.org/10.1145/863955.863998}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/SIGCOMM\%2703\%20-\%20The\%20impact\%20of\%20DHT\%20routing\%20geometry\%20on\%20resilience\%20and\%20proximity.pdf}, }
@conference{Guo:2005:MAM:1251086.1251090, title = {Measurements, analysis, and modeling of BitTorrent-like systems}, author = {Guo, Lei and Chen, Songqing and Xiao, Zhen and Tan, Enhua and Ding, Xiaoning and Zhang, Xiaodong}, booktitle = {IMC'05. Proceedings of the 5th ACM SIGCOMM Conference on Internet Measurement}, organization = {USENIX Association}, year = {2005}, month = {October}, address = {Berkeley, CA, USA}, pages = {4--4}, publisher = {USENIX Association}, series = {IMC '05}, abstract = {Existing studies on BitTorrent systems are single-torrent based, while more than 85\% of all peers participate in multiple torrents according to our trace analysis. In addition, these studies are not sufficiently insightful and accurate even for single-torrent models, due to some unrealistic assumptions. Our analysis of representative Bit-Torrent traffic provides several new findings regarding the limitations of BitTorrent systems: (1) Due to the exponentially decreasing peer arrival rate in reality, service availability in such systems becomes poor quickly, after which it is difficult for the file to be located and downloaded. (2) Client performance in the BitTorrent-like systems is unstable, and fluctuates widely with the peer population. (3) Existing systems could provide unfair services to peers, where peers with high downloading speed tend to download more and upload less. In this paper, we study these limitations on torrent evolution in realistic environments. Motivated by the analysis and modeling results, we further build a graph based multi-torrent model to study inter-torrent collaboration. Our model quantitatively provides strong motivation for inter-torrent collaboration instead of directly stimulating seeds to stay longer. We also discuss a system design to show the feasibility of multi-torrent collaboration}, www_section = {bittorrent system, intertorrent collaboration, multi-torrent collaboration, multiple torrents}, url = {http://www.usenix.org/events/imc05/tech/full_papers/guo/guo_html/}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/IMC\%2705\%20-\%20Measurement\%2C\%20analysis\%20and\%20modeling\%20of\%20BitTorrent-like\%20systems.pdf}, }
@conference{Gupta03kelips:building, title = {Kelips: Building an efficient and stable P2P DHT through increased memory and background overhead}, author = {Indranil Gupta and Kenneth P. Birman and Prakash Linga and Alan Demers and Robbert Van Renesse}, booktitle = {Proceedings of the 2nd International Workshop on Peer-to-Peer Systems (IPTPS '03)}, year = {2003}, abstract = {A peer-to-peer (p2p) distributed hash table (DHT) system allows hosts to join and fail silently (or leave), as well as to insert and retrieve files (objects). This paper explores a new point in design space in which increased memory usage and constant background communication overheads are tolerated to reduce file lookup times and increase stability to failures and churn. Our system, called Kelips, uses peer-to-peer gossip to partially replicate file index information. In Kelips, (a) under normal conditions, file lookups are resolved with O(1) time and complexity (i.e., independent of system size), and (b) membership changes (e.g., even when a large number of nodes fail) are detected and disseminated to the system quickly. Per-node memory requirements are small in medium-sized systems. When there are failures, lookup success is ensured through query rerouting. Kelips achieves load balancing comparable to existing systems. Locality is supported by using topologically aware gossip mechanisms. Initial results of an ongoing experimental study are also discussed}, www_section = {distributed hash table, P2P}, url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.13.3464}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.13.3464.pdf}, }
@conference{Gupta:2004:RMF:1018440.1021942, title = {Reputation Management Framework and Its Use as Currency in Large-Scale Peer-to-Peer Networks}, author = {Gupta, Rohit and Somani, Arun K.}, booktitle = {P2P'04. Proceedings of the 4th International Conference on Peer-to-Peer Computing}, organization = {IEEE Computer Society}, year = {2004}, month = {August}, address = {Zurich, Switzerland}, pages = {124--132}, publisher = {IEEE Computer Society}, series = {P2P '04}, abstract = {In this paper we propose a reputation management framework for large-scale peer-to-peer (P2P) networks, wherein all nodes are assumed to behave selfishly. The proposed framework has several advantages. It enables a form of virtual currency, such that the reputation of nodes is a measure of their wealth. The framework is scalable and provides protection against attacks by malicious nodes. The above features are achieved by developing trusted communities of nodes whose members trust each other and cooperate to deal with the problem of nodes{\'y} selfishness and possible maliciousness}, www_section = {framework, P2P, peer-to-peer networking, reputation management}, isbn = {0-7695-2156-8}, doi = {http://dx.doi.org/10.1109/P2P.2004.44}, url = {http://dx.doi.org/10.1109/P2P.2004.44}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/P2P\%2704\%20-\%20Reputation\%20management\%20framework.pdf}, }
@conference{Halevy:2002:LBE:646767.704291, title = {The LSD Broadcast Encryption Scheme}, author = {Halevy, Dani and Shamir, Adi}, booktitle = {CRYPTO'02--Proceedings of the 22nd Annual International Cryptology Conference on Advances in Cryptology}, organization = {Springer-Verlag}, year = {2002}, month = {August}, address = {Santa Barbara, CA, USA}, pages = {47--60}, publisher = {Springer-Verlag}, series = {Lecture Notes in Computer Science}, abstract = {Broadcast Encryption schemes enable a center to broadcast encrypted programs so that only designated subsets of users can decrypt each program. The stateless variant of this problem provides each user with a fixed set of keys which is never updated. The best scheme published so far for this problem is the "subset difference" (SD) technique of Naor Naor and Lotspiech, in which each one of the n users is initially given O(log2(n)) symmetric encryption keys. This allows the broadcaster to define at a later stage any subset of up to r users as "revoked", and to make the program accessible only to their complement by sending O(r) short messages before the encrypted program, and asking each user to perform an O(log(n)) computation. In this paper we describe the "Layered Subset Difference" (LSD) technique, which achieves the same goal with O(log1+{\textquestiondown}(n)) keys, O(r) messages, and O(log(n)) computation. This reduces the number of keys given to each user by almost a square root factor without affecting the other parameters. In addition, we show how to use the same LSD keys in order to address any subset defined by a nested combination of inclusion and exclusion conditions with a number of messages which is proportional to the complexity of the description rather than to the size of the subset. The LSD scheme is truly practical, and makes it possible to broadcast an unlimited number of programs to 256,000,000 possible customers by giving each new customer a smart card with one kilobyte of tamper-resistant memory. It is then possible to address any subset defined by t nested inclusion and exclusion conditions by sending less than 4t short messages, and the scheme remains secure even if all the other users form an adversarial coalition}, www_section = {broadcast encryption scheme, encryption, LSD}, isbn = {3-540-44050-X}, url = {http://dl.acm.org/citation.cfm?id=646767.704291}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/CRYPTO\%2702\%20-\%20The\%20LSD\%20broadcast\%20encryption\%20scheme.pdf}, }
@booklet{Hall01onalgorithms, title = {On Algorithms for Efficient Data Migration}, author = {Joseph Hall and Jason D. Hartline and Anna R. Karlin and Jared Saia and John Wilkes}, year = {2001}, abstract = {The data migration problem is the problem of computing an efficient plan for moving data stored on devices in a network from one configuration to another. Load balancing or changing usage patterns could necessitate such a rearrangement of data. In this paper, we consider the case where the objects are fixed-size and the network is complete. The direct migration problem is closely related to edge-coloring. However, because there are space constraints on the devices, the problem is more complex. Our main results are polynomial time algorithms for finding a near-optimal migration plan in the presence of space constraints when a certain number of additional nodes is available as temporary storage, and a 3/2-approximation for the case where data must be migrated directly to its destination}, url = {http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.26.1365\&rep=rep1\&type=pdf}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.26.1365.pdf}, %%%%% ERROR: Missing field % www_section = {?????}, }
@conference{HanLLHP05, title = {A Random Walk Based Anonymous Peer-to-Peer Protocol Design}, author = {Jinsong Han and Yunhao Liu and Li Lu and Lei Hu and Abhishek Patil}, booktitle = {Proceedings of ICCNMC}, organization = {Springer Berlin / Heidelberg}, year = {2005}, pages = {143--152}, publisher = {Springer Berlin / Heidelberg}, abstract = {Anonymity has been one of the most challenging issues in Ad Hoc environment such as P2P systems. In this paper, we propose an anonymous protocol called Random Walk based Anonymous Protocol (RWAP), in decentralized P2P systems. We evaluate RWAP by comprehensive trace driven simulations. Results show that RWAP significantly reduces traffic cost and encryption overhead compared with existing approaches}, www_section = {anonymity, P2P, RWAP}, isbn = {978-3-540-28102-3}, doi = {10.1007/11534310}, url = {http://www.springerlink.com/content/0642hvq80b27vv1f/}, }
@conference{Harren:2002:CQD:646334.687945, title = {Complex Queries in DHT-based Peer-to-Peer Networks}, author = {Harren, Matthew and Hellerstein, Joseph M. and Huebsch, Ryan and Boon Thau Loo and S Shenker and Ion Stoica}, booktitle = {IPTPS'01--Revised Papers from the First International Workshop on Peer-to-Peer Systems}, organization = {Springer-Verlag}, year = {2002}, month = {March}, address = {Cambridge, MA, USA}, pages = {242--259}, publisher = {Springer-Verlag}, series = {IPTPS '01}, abstract = {Recently a new generation of P2P systems, offering distributed hash table (DHT) functionality, have been proposed. These systems greatly improve the scalability and exact-match accuracy of P2P systems, but offer only the exact-match query facility. This paper outlines a research agenda for building complex query facilities on top of these DHT-based P2P systems. We describe the issues involved and outline our research plan and current status}, www_section = {distributed hash table}, isbn = {3-540-44179-4}, url = {http://dl.acm.org/citation.cfm?id=646334.687945}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/IPTPS\%2701\%20-\%20Complex\%20queries\%20in\%20DHT-based\%20p2p\%20networks.pdf}, }
@conference{Hartline:2008:OMD:1374376.1374390, title = {Optimal mechanism design and money burning}, author = {Jason D. Hartline and Roughgarden, Tim}, booktitle = {STOC'08. Proceedings of the 40th annual ACM Symposium on Theory of Computing}, organization = {ACM}, year = {2008}, month = {May}, address = {Victoria, British Columbia, Canada}, pages = {75--84}, publisher = {ACM}, series = {STOC '08}, abstract = {Mechanism design is now a standard tool in computer science for aligning the incentives of self-interested agents with the objectives of a system designer. There is, however, a fundamental disconnect between the traditional application domains of mechanism design (such as auctions) and those arising in computer science (such as networks): while monetary "transfers" (i.e., payments) are essential for most of the known positive results in mechanism design, they are undesirable or even technologically infeasible in many computer systems. Classical impossibility results imply that the reach of mechanisms without transfers is severely limited. Computer systems typically do have the ability to reduce service quality--routing systems can drop or delay traffic, scheduling protocols can delay the release of jobs, and computational payment schemes can require computational payments from users (e.g., in spam-fighting systems). Service degradation is tantamount to requiring that users "burn money", and such "payments" can be used to influence the preferences of the agents at a cost of degrading the social surplus. We develop a framework for the design and analysis of "money-burning mechanisms" to maximize the residual surplus-the total value of the chosen outcome minus the payments required. Our primary contributions are the following. * We define a general template for prior-free optimal mechanism design that explicitly connects Bayesian optimal mechanism design, the dominant paradigm in economics, with worst-case analysis. In particular, we establish a general and principled way to identify appropriate performance benchmarks in prior-free mechanism design. * For general single-parameter agent settings, we characterize the Bayesian optimal money-burning mechanism. * For multi-unit auctions, we design a near-optimal prior-free money-burning mechanism: for every valuation profile, its expected residual surplus is within a constant factor of our benchmark, the residual surplus of the best Bayesian optimal mechanism for this profile. * For multi-unit auctions, we quantify the benefit of general transfers over money-burning: optimal money-burning mechanisms always obtain a logarithmic fraction of the full social surplus, and this bound is tight}, www_section = {mechanism design, money burning, optimal mechanism design}, isbn = {978-1-60558-047-0}, doi = {http://doi.acm.org/10.1145/1374376.1374390}, url = {http://doi.acm.org/10.1145/1374376.1374390}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/STOC\%2708\%20-\%20Optimal\%20mechanism\%20design\%20and\%20money\%20burning.pdf}, }
@conference{Harvey:2003:SSO:1251460.1251469, title = {SkipNet: a scalable overlay network with practical locality properties}, author = {Harvey, Nicholas J. A. and Michael B. Jones and Stefan Saroiu and Marvin Theimer and Wolman, Alec}, booktitle = {Proceedings of the 4th conference on USENIX Symposium on Internet Technologies and Systems--Volume 4}, organization = {USENIX Association}, year = {2003}, address = {Berkeley, CA, USA}, pages = {9--9}, publisher = {USENIX Association}, series = {USITS'03}, www_section = {distributed hash table, range queries, SkipNet}, url = {http://dl.acm.org/citation.cfm?id=1251460.1251469}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/harvey.pdf}, }
@conference{Heimbigner00adaptingpublish/subscribe, title = {Adapting Publish/Subscribe Middleware to Achieve Gnutella-like Functionality}, author = {Dennis Heimbigner}, booktitle = {In Proc. of SAC}, year = {2000}, pages = {176--181}, abstract = {Gnutella represents a new wave of peer-to-peer applications providing distributed discovery and sharing of resources across the Internet. Gnutella is distinguished by its support for anonymity and by its decentralized architecture. The current Gnutella architecture and protocol have numerous flaws with respect to efficiency, anonymity, and vulnerability to malicious actions. An alternative design is described that provides Gnutella-like functionality but removes or mitigates many of Gnutella's flaws. This design, referred to as Query/Advertise (Q/A) is based upon a scalable publish/subscribe middleware system called Sienab. A prototype implementation of Q/A is described. The relative benefits of this approach are discussed, and a number of open research problems are identified with respect to Q/A systems}, url = {http://serl.cs.colorado.edu/~serl/papers/CU-CS-909-00.pdf}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/CU-CS-909-00.pdf}, %%%%% ERROR: Missing field % www_section = {?????}, }
@conference{Helmy04efficientresource, title = {Efficient Resource Discovery in Wireless AdHoc Networks: Contacts Do Help}, author = {Ahmed Helmy}, booktitle = {Chapter in: Resource Management in Wireless Networking}, organization = {Kluwer Academic Publishers}, year = {2004}, publisher = {Kluwer Academic Publishers}, abstract = {The resource discovery problem poses new challenges in infrastructure-less wireless networks. Due to the highly dynamic nature of these networks and their bandwidth and energy constraints, there is a pressing need for energy-aware communicationefficient resource discovery protocols. This chapter provides an overview of several approaches to resource discovery, discussing their suitability for classes of wireless networks. The approaches discussed in this chapter include flooding-based approaches, hierarchical cluster-based and dominating set schemes, and hybrid loose hierarchy architectures. Furthermore, the chapter provides a detailed case study on the design, evaluation and analysis of an energy-efficient resource discovery protocol based on hybrid loose hierarchy and utilizing the concept of {\textquoteleft}contacts'}, url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.76.9310}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.76.9310.pdf}, %%%%% ERROR: Missing field % www_section = {?????}, }
@booklet{Heydon01thevesta, title = {The Vesta Approach to Software Configuration Management}, author = {Allan Heydon and Roy Levin and Timothy Mann and Yuan Yu}, year = {2001}, abstract = {Vesta is a system for software configuration management. It stores collections of source files, keeps track of which versions of which files go together, and automates the process of building a complete software artifact from its component pieces. Vesta's novel approach gives it three important properties. First, every build is repeatable, because its component sources and build tools are stored immutably and immortally, and its configuration description completely specifies what components and tools are used and how they are put together. Second, every build is incremental, because results of previous builds are cached and reused. Third, every build is consistent, because all build dependencies are automatically captured and recorded, so that a cached result from a previous build is reused only when doing so is certain to be correct. In addition, Vesta's flexible language for writing configuration descriptions makes it easy to describe large software configurations in a modular fashion and to create variant configurations by customizing build parameters. This paper gives a brief overview of Vesta, outlining Vesta's advantages over traditional tools, how those benefits are achieved, and the system's overall performance}, url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.23.7370}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/SRC-RR-168.pdf}, %%%%% ERROR: Missing field % www_section = {?????}, }
@conference{Hildrum03asymptoticallyefficient, title = {Asymptotically Efficient Approaches to Fault-Tolerance in Peer-to-Peer}, author = {Hildrum, Kirsten and John Kubiatowicz}, booktitle = {In Proc. of DISC}, year = {2003}, pages = {321--336}, abstract = {In this paper, we show that two peer-to-peer systems, Pastry [13] and Tapestry [17] can be made tolerant to certain classes of failures and a limited class of attacks. These systems are said to operate properly if they can find the closest node matching a requested ID. The system must also be able to dynamically construct the necessary routing information when new nodes enter or the network changes. We show that with an additional factor of storage overhead and communication overhead, they can continue to achieve both of these goals in the presence of a constant fraction nodes that do not obey the protocol. Our techniques are similar in spirit to those of Saia et al. [14] and Naor and Wieder [10]. Some simple simulations show that these techniques are useful even with constant overhead}, www_section = {fault-tolerance, P2P}, isbn = {978-3-540-20184-7}, doi = {10.1007/b13831}, url = {http://www.springerlink.com/content/7emt7u01cvbb6bu6/}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.4.334.pdf}, }
@booklet{Hildrum:CSD-02-1178, title = {Distributed Data Location in a Dynamic Network}, author = {Hildrum, Kirsten and John Kubiatowicz and Rao, Satish and Ben Y. Zhao}, number = {UCB/CSD-02-1178}, year = {2002}, month = {April}, publisher = {EECS Department, University of California, Berkeley}, abstract = {Modern networking applications replicate data and services widely, leading to a need for location-independent routing -- the ability to route queries directly to objects using names that are independent of the objects' physical locations. Two important properties of a routing infrastructure are routing locality and rapid adaptation to arriving and departing nodes. We show how these two properties can be achieved with an efficient solution to the nearest-neighbor problem. We present a new distributed algorithm that can solve the nearest-neighbor problem for a restricted metric space. We describe our solution in the context of Tapestry, an overlay network infrastructure that employs techniques proposed by Plaxton, Rajaraman, and Richa}, url = {http://www.eecs.berkeley.edu/Pubs/TechRpts/2002/5214.html}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/CSD-02-1178.pdf}, %%%%% ERROR: Missing field % www_section = {?????}, }
@conference{Hof04SecureDistributedServiceDirectory, title = {Design of a Secure Distributed Service Directory for Wireless Sensornetworks}, author = {Hans-Joachim Hof and Erik-Oliver Blass and Thomas Fuhrmann and Martina Zitterbart}, booktitle = {Proceedings of the First European Workshop on Wireless Sensor Networks}, year = {2004}, address = {Berlin, Germany}, type = {publication}, abstract = {Sensor networks consist of a potentially huge number of very small and resource limited self-organizing devices. This paper presents the design of a general distributed service directory architecture for sensor networks which especially focuses on the security issues in sensor networks. It ensures secure construction and maintenance of the underlying storage structure, a Content Addressable Network. It also considers integrity of the distributed service directory and secures communication between service provider and inquirer using self-certifying path names. Key area of application of this architecture are gradually extendable sensor networks where sensors and actuators jointly perform various user defined tasks, e.g., in the field of an office environment}, www_section = {sensor networks}, isbn = {978-3-540-20825-9}, doi = {10.1007/978-3-540-24606-0_19}, url = {http://i30www.ira.uka.de/research/publications/p2p/}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/scan.pdf}, }
@conference{HotOrNot, title = {Hot or Not: Revealing Hidden Services by their Clock Skew}, author = {Steven J. Murdoch}, booktitle = {Proceedings of CCS 2006}, organization = {ACM New York, NY, USA}, year = {2006}, month = {October}, publisher = {ACM New York, NY, USA}, abstract = {Location-hidden services, as offered by anonymity systems such as Tor, allow servers to be operated under a pseudonym. As Tor is an overlay network, servers hosting hidden services are accessible both directly and over the anonymous channel. Traffic patterns through one channel have observable effects on the other, thus allowing a service's pseudonymous identity and IP address to be linked. One proposed solution to this vulnerability is for Tor nodes to provide fixed quality of service to each connection, regardless of other traffic, thus reducing capacity but resisting such interference attacks. However, even if each connection does not influence the others, total throughput would still affect the load on the CPU, and thus its heat output. Unfortunately for anonymity, the result of temperature on clock skew can be remotely detected through observing timestamps. This attack works because existing abstract models of anonymity-network nodes do not take into account the inevitable imperfections of the hardware they run on. Furthermore, we suggest the same technique could be exploited as a classical covert channel and can even provide geolocation}, www_section = {anonymity, clock skew, covert channels, fingerprinting, Tor}, isbn = {1-59593-518-5}, doi = {10.1145/1180405.1180410}, url = {http://portal.acm.org/citation.cfm?id=1180410}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/HotOrNot.pdf}, }
@booklet{Hubaux01thequest, title = {The Quest for Security in Mobile Ad Hoc Networks}, author = {Jean-Pierre Hubaux and Levente Butty{\'a}n and Srdan Capkun}, year = {2001}, url = {http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.130.6088\&rep=rep1\&type=pdf}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.130.6088.pdf}, %%%%% ERROR: Missing field % www_section = {?????}, }
@conference{Huebsch:2003:QIP:1315451.1315480, title = {Querying the internet with PIER}, author = {Huebsch, Ryan and Hellerstein, Joseph M. and Lanham, Nick and Boon Thau Loo and S Shenker and Ion Stoica}, booktitle = {Proceedings of the 29th international conference on Very large data bases--Volume 29}, organization = {VLDB Endowment}, year = {2003}, pages = {321--332}, publisher = {VLDB Endowment}, series = {VLDB '03}, www_section = {distributed hash table, PIER, range queries}, isbn = {0-12-722442-4}, url = {http://dl.acm.org/citation.cfm?id=1315451.1315480}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/vldb03-pier.pdf}, }
@booklet{Hurler_automaticcontext, title = {Automatic Context Integration for Group Aware Environments}, author = {Bernhard Hurler and Leo Petrak and Thomas Fuhrmann and Oliver Brand and Martina Zitterbart}, year = {2003}, abstract = {Tele-collaboration is a valuable tool that can connect learners at different sites and help them benefit from their respective competences. Albeit many e-learning applications provide a high level of technical sophistication, such tools typically fall short of reflecting the learners ' full context, e.g., their presence and awareness. Hence, these applications cause many disturbances in the social interaction of the learners. This paper describes mechanisms to improve the group awareness in elearning environments with the help of automatic integration of such context information from the physical world. This information is gathered by different embedded sensors in various objects, e.g., a coffee mug or an office chair. This paper also describes first results of the integration of these sensors into an existing CSCW/CSCL framework}, url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.71.1450}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/hurler03context.pdf}, %%%%% ERROR: Missing field % www_section = {?????}, }
@conference{IPTPS05, title = {High Availability in DHTs: Erasure Coding vs. Replication}, author = {Rodrigues, Rodrigo and Barbara Liskov}, booktitle = {IPTPS'05--Proceedings of the 4th International Workshop in Peer-to-Peer Systems}, organization = {Springer}, volume = {3640}, year = {2005}, month = {February}, address = {Ithaca, New York}, publisher = {Springer}, series = {Lecture Notes in Computer Science}, abstract = {High availability in peer-to-peer DHTs requires data redundancy. This paper compares two popular redundancy schemes: replication and erasure coding. Unlike previous comparisons, we take the characteristics of the nodes that comprise the overlay into account, and conclude that in some cases the benefits from coding are limited, and may not be worth its disadvantages}, www_section = {distributed hash table, erasure coding, high availability, peer-to-peer networking, redundancy, Replication}, doi = {10.1007/11558989_21}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/IPTPS\%2705\%20-\%20High\%20availability\%20in\%20DHTs\%3A\%20erasure\%20coding\%20vs.\%20replication.pdf}, }
@conference{ISDN-mixes, title = {ISDN-mixes: Untraceable communication with very small bandwidth overhead}, author = {Andreas Pfitzmann and Birgit Pfitzmann and Michael Waidner}, booktitle = {Proceedings of the GI/ITG Conference on Communication in Distributed Systems}, organization = {Springer-Verlag London, UK}, year = {1991}, month = {February}, pages = {451--463}, publisher = {Springer-Verlag London, UK}, abstract = {Untraceable communication for services like telephony is often considered infeasible in the near future because of bandwidth limitations. We present a technique, called ISDN-MIXes, which shows that this is not the case. As little changes as possible are made to the narrowband-ISDN planned by the PTTs. In particular, we assume the same subscriber lines with the same bit rate, and the same long-distance network between local exchanges, and we offer the same services. ISDN-MIXes are a combination of a new variant of CHAUM's MIXes, dummy traffic on the subscriber lines (where this needs no additional bandwidth), and broadcast of incoming-call messages in the subscriber-area}, isbn = {3-540-53721-X}, url = {http://portal.acm.org/citation.cfm?id=645662.664536}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.43.4892.pdf}, www_section = {Unsorted}, }
@booklet{Iii_keylessjam, title = {Keyless Jam Resistance}, author = {Leemon C. Baird and William L. Bahn and Michael D. Collins and Martin C. Carlisle and Sean C. Butler}, year = {2007}, abstract = {has been made resistant to jamming by the use of a secret key that is shared by the sender and receiver. There are no known methods for achieving jam resistance without that shared key. Unfortunately, wireless communication is now reaching a scale and a level of importance where such secret-key systems are becoming impractical. For example, the civilian side of the Global Positioning System (GPS) cannot use a shared secret, since that secret would have to be given to all 6.5 billion potential users, and so would no longer be secret. So civilian GPS cannot currently be protected from jamming. But the FAA has stated that the civilian airline industry will transition to using GPS for all navigational aids, even during landings. A terrorist with a simple jamming system could wreak havoc at a major airport. No existing system can solve this problem, and the problem itself has not even been widely discussed. The problem of keyless jam resistance is important. There is a great need for a system that can broadcast messages without any prior secret shared between the sender and receiver. We propose the first system for keyless jam resistance: the BBC algorithm. We describe the encoding, decoding, and broadcast algorithms. We then analyze it for expected resistance to jamming and error rates. We show that BBC can achieve the same level of jam resistance as traditional spread spectrum systems, at just under half the bit rate, and with no shared secret. Furthermore, a hybrid system can achieve the same average bit rate as traditional systems}, www_section = {GPS}, url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.91.8217}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.91.8217.pdf}, }
@conference{Infocom2007-SNS, title = {Implications of Selfish Neighbor Selection in Overlay Networks}, author = {Nikolaos Laoutaris and Georgios Smaragdakis and Azer Bestavros and Byers, John W.}, booktitle = {Proceedings of IEEE INFOCOM 2007}, year = {2007}, month = {May}, address = {Anchorage, AK}, www_section = {EGOIST, game theory, routing}, url = {www.cs.bu.edu/techreports/pdf/2006-019-selfish-neighbor-selection.pdf}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Infocom2007-sns.pdf}, }
@conference{Infocom2008, title = {Swarming on Optimized Graphs for n-way Broadcast}, author = {Georgios Smaragdakis and Nikolaos Laoutaris and Pietro Michiardi and Azer Bestavros and Byers, John W. and Mema Roussopoulos}, booktitle = {Proceedings of IEEE INFOCOM 2008}, year = {2008}, month = {April}, address = {Phoenix, AZ}, abstract = {In an n-way broadcast application each one of n overlay nodes wants to push its own distinct large data file to all other n-1 destinations as well as download their respective data files. BitTorrent-like swarming protocols are ideal choices for handling such massive data volume transfers. The original BitTorrent targets one-to-many broadcasts of a single file to a very large number of receivers and thus, by necessity, employs an almost random overlay topology. n-way broadcast applications on the other hand, owing to their inherent n-squared nature, are realizable only in small to medium scale networks. In this paper, we show that we can leverage this scale constraint to construct optimized overlay topologies that take into consideration the end-to-end characteristics of the network and as a consequence deliver far superior performance compared to random and myopic (local) approaches. We present the Max-Min and Max- Sum peer-selection policies used by individual nodes to select their neighbors. The first one strives to maximize the available bandwidth to the slowest destination, while the second maximizes the aggregate output rate. We design a swarming protocol suitable for n-way broadcast and operate it on top of overlay graphs formed by nodes that employ Max-Min or Max-Sum policies. Using trace-driven simulation and measurements from a PlanetLab prototype implementation, we demonstrate that the performance of swarming on top of our constructed topologies is far superior to the performance of random and myopic overlays. Moreover, we show how to modify our swarming protocol to allow it to accommodate selfish nodes}, www_section = {EGOIST, game theory, routing}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Infocom2008.pdf}, }
@conference{Irwin:2005:SVC:1080192.1080194, title = {Self-recharging virtual currency}, author = {Irwin, David and Chase, Jeff and Grit, Laura and Yumerefendi, Aydan}, booktitle = {P2PECON'05. Proceedings of the 2005 ACM SIGCOMM Workshop on Economics of Peer-to-Peer Systems}, organization = {ACM}, year = {2005}, month = {August}, address = {Philadelphia, Pennsylvania, USA}, pages = {93--98}, publisher = {ACM}, series = {P2PECON '05}, abstract = {Market-based control is attractive for networked computing utilities in which consumers compete for shared resources (computers, storage, network bandwidth). This paper proposes a new self-recharging virtual currency model as a common medium of exchange in a computational market. The key idea is to recycle currency through the economy automatically while bounding the rate of spending by consumers. Currency budgets may be distributed among consumers according to any global policy; consumers spend their budgets to schedule their resource usage through time, but cannot hoard their currency or starve.We outline the design and rationale for self-recharging currency in Cereus, a system for market-based community resource sharing, in which participants are authenticated and sanctions are sufficient to discourage fraudulent behavior. Currency transactions in Cereus are accountable: offline third-party audits can detect and prove cheating, so participants may transfer and recharge currency autonomously without involvement of the trusted banking service}, www_section = {market, virtual currency}, isbn = {1-59593-026-4}, doi = {http://doi.acm.org/10.1145/1080192.1080194}, url = {http://doi.acm.org/10.1145/1080192.1080194}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/P2PECON\%2705\%20-\%20Self-recharging\%20virtual\%20currency.pdf}, }
@article{Isdal:2010:PPD:1851275.1851198, title = {Privacy-preserving P2P data sharing with OneSwarm}, author = {Isdal, Tomas and Piatek, Michael and Krishnamurthy, Arvind and Anderson, Thomas}, journal = {SIGCOMM Comput. Commun. Rev}, volume = {40}, number = {4}, year = {2010}, address = {New York, NY, USA}, pages = {111--122}, publisher = {ACM}, www_section = {anonymity, OneSwarm, p2p network}, issn = {0146-4833}, doi = {10.1145/1851275.1851198}, url = {http://doi.acm.org/10.1145/1851275.1851198}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/oneswarm_SIGCOMM.pdf}, }
@conference{Jannotti:2000:ORM:1251229.1251243, title = {Overcast: reliable multicasting with on overlay network}, author = {Jannotti, John and Gifford, David K. and Johnson, Kirk L. and Frans M. Kaashoek and O'Toole Jr., James W.}, booktitle = {OSDI'00. Proceedings of the 4th conference on Symposium on Operating System Design \& Implementation}, organization = {USENIX Association}, year = {2000}, month = {October}, address = {San Diego, California, USA}, pages = {14--14}, publisher = {USENIX Association}, series = {OSDI'00}, abstract = {Overcast is an application-level multicasting system that can be incrementally deployed using today's Internet infrastructure. These properties stem from Overcast's implementation as an overlay network. An overlay network consists of a collection of nodes placed at strategic locations in an existing network fabric. These nodes implement a network abstraction on top of the network provided by the underlying substrate network. Overcast provides scalable and reliable single-source multicast using a simple protocol for building efficient data distribution trees that adapt to changing network conditions. To support fast joins, Overcast implements a new protocol for efficiently tracking the global status of a changing distribution tree. Results based on simulations confirm that Overcast provides its added functionality while performing competitively with IP Multicast. Simulations indicate that Overcast quickly builds bandwidth-efficient distribution trees that, compared to IP Multicast, provide 70\%-100\% of the total bandwidth possible, at a cost of somewhat less than twice the network load. In addition, Overcast adapts quickly to changes caused by the addition of new nodes or the failure of existing nodes without causing undue load on the multicast source}, www_section = {overcast, overlay network}, url = {http://dl.acm.org/citation.cfm?id=1251229.1251243}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/OSDI\%2700\%20-\%20Overcast.pdf}, }
@article{Jelasity:2005:GAL:1082469.1082470, title = {Gossip-based aggregation in large dynamic networks}, author = {M{\'a}rk Jelasity and Alberto Montresor and Babaoglu, Ozalp}, journal = {ACM Transactions on Computer Systems}, volume = {23}, year = {2005}, month = {August}, address = {New York, NY, USA}, pages = {219--252}, publisher = {ACM}, abstract = {As computer networks increase in size, become more heterogeneous and span greater geographic distances, applications must be designed to cope with the very large scale, poor reliability, and often, with the extreme dynamism of the underlying network. Aggregation is a key functional building block for such applications: it refers to a set of functions that provide components of a distributed system access to global information including network size, average load, average uptime, location and description of hotspots, and so on. Local access to global information is often very useful, if not indispensable for building applications that are robust and adaptive. For example, in an industrial control application, some aggregate value reaching a threshold may trigger the execution of certain actions; a distributed storage system will want to know the total available free space; load-balancing protocols may benefit from knowing the target average load so as to minimize the load they transfer. We propose a gossip-based protocol for computing aggregate values over network components in a fully decentralized fashion. The class of aggregate functions we can compute is very broad and includes many useful special cases such as counting, averages, sums, products, and extremal values. The protocol is suitable for extremely large and highly dynamic systems due to its proactive structure---all nodes receive the aggregate value continuously, thus being able to track any changes in the system. The protocol is also extremely lightweight, making it suitable for many distributed applications including peer-to-peer and grid computing systems. We demonstrate the efficiency and robustness of our gossip-based protocol both theoretically and experimentally under a variety of scenarios including node and communication failures}, www_section = {Gossip-based protocols, proactive aggregation}, issn = {0734-2071}, doi = {http://doi.acm.org/10.1145/1082469.1082470}, url = {http://doi.acm.org/10.1145/1082469.1082470}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Jelasity\%2C\%20Montresor\%20\%26\%20Babaoglu\%20-\%20Gossip-based\%20aggregation.pdf}, }
@conference{Jian:2008:WSP:1409540.1409546, title = {Why Share in Peer-to-Peer Networks?}, author = {Jian, Lian and MacKie-Mason, Jeffrey K.}, booktitle = {EC'08. Proceedings of the 10th International Conference on Electronic Commerce}, organization = {ACM}, year = {2008}, month = {August}, address = {Innsbruck, Austria}, pages = {4:1--4:8}, publisher = {ACM}, series = {ICEC '08}, abstract = {Prior theory and empirical work emphasize the enormous free-riding problem facing peer-to-peer (P2P) sharing networks. Nonetheless, many P2P networks thrive. We explore two possible explanations that do not rely on altruism or explicit mechanisms imposed on the network: direct and indirect private incentives for the provision of public goods. The direct incentive is a traffic redistribution effect that advantages the sharing peer. We find this incentive is likely insufficient to motivate equilibrium content sharing in large networks. We then approach P2P networks as a graph-theoretic problem and present sufficient conditions for sharing and free-riding to co-exist due to indirect incentives we call generalized reciprocity}, www_section = {file-sharing, networks, P2P, peer-to-peer networking}, isbn = {978-1-60558-075-3}, doi = {http://doi.acm.org/10.1145/1409540.1409546}, url = {http://doi.acm.org/10.1145/1409540.1409546}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/EC\%2708\%20-\%20Why\%20share\%20in\%20peer-to-peer\%20networks.pdf}, }
@book{Jiang_trustand, title = {Trust and Cooperation in Peer-to-Peer Systems}, author = {Junjie Jiang and Haihuan Bai and Weinong Wang}, booktitle = {Lecture Notes in Computer Science}, organization = {Springer Berlin / Heidelberg}, volume = {3032}, year = {2004}, pages = {371--378}, publisher = {Springer Berlin / Heidelberg}, abstract = {Most of the past studies on peer-to-peer systems have emphasized routing and lookup. The selfishness of users, which brings on the free riding problem, has not attracted sufficient attention from researchers. In this paper, we introduce a decentralized reputation-based trust model first, in which trust relationships could be built based on the reputation of peers. Subsequently, we use the iterated prisoner's dilemma to model the interactions in peer-to-peer systems and propose a simple incentive mechanism. By simulations, it's shown that the stable cooperation can emerge after limited rounds of interaction between peers by using the incentive mechanism}, www_section = {cooperation, incentives, iterated prisoner's dilemma, peer-to-peer networking}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Jiang\%2C\%20Bai\%20\%26\%20Wang\%20-\%20Trust\%20and\%20Cooperation\%20in\%20Peer-to-Peer\%20Systems.pdf}, }
@conference{Jun:2005:IBI:1080192.1080199, title = {Incentives in BitTorrent Induce Free Riding}, author = {Jun, Seung and Ahamad, Mustaque}, booktitle = {P2PECON'05. Proceedings of the 2005 ACM SIGCOMM Workshop on Economics of Peer-to-Peer Systems}, organization = {ACM}, year = {2005}, month = {August}, address = {Philadelphia, Pennsylvania, USA}, pages = {116--121}, publisher = {ACM}, series = {P2PECON '05}, abstract = {We investigate the incentive mechanism of BitTorrent, which is a peer-to-peer file distribution system. As downloaders in BitTorrent are faced with the conflict between the eagerness to download and the unwillingness to upload, we relate this problem to the iterated prisoner's dilemma, which suggests guidelines to design a good incentive mechanism. Based on these guidelines, we propose a new, simple incentive mechanism. Our analysis and the experimental results using PlanetLab show that the original incentive mechanism of BitTorrent can induce free riding because it is not effective in rewarding and punishing downloaders properly. In contrast, a new mechanism proposed by us is shown to be more robust against free riders}, www_section = {BitTorrent, data dissemination, prisoner's dilemma, strategy}, isbn = {1-59593-026-4}, doi = {http://doi.acm.org/10.1145/1080192.1080199}, url = {http://doi.acm.org/10.1145/1080192.1080199}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/P2PECON\%2705\%20-\%20Incentives\%20in\%20BitTorrent\%20induce\%20free\%20riding.pdf}, }
@conference{Junges:2008:EPD:1402298.1402308, title = {Evaluating the performance of DCOP algorithms in a real world, dynamic problem}, author = {Junges, Robert and Bazzan, Ana L. C.}, booktitle = {AAMAS8--Proceedings of the 7th international joint conference on Autonomous agents and multiagent systems}, organization = {International Foundation for Autonomous Agents and Multiagent Systems}, year = {2008}, month = {May}, address = {Estoril, Portugal}, pages = {599--606}, publisher = {International Foundation for Autonomous Agents and Multiagent Systems}, series = {AAMAS '08}, abstract = {Complete algorithms have been proposed to solve problems modelled as distributed constraint optimization (DCOP). However, there are only few attempts to address real world scenarios using this formalism, mainly because of the complexity associated with those algorithms. In the present work we compare three complete algorithms for DCOP, aiming at studying how they perform in complex and dynamic scenarios of increasing sizes. In order to assess their performance we measure not only standard quantities such as number of cycles to arrive to a solution, size and quantity of exchanged messages, but also computing time and quality of the solution which is related to the particular domain we use. This study can shed light in the issues of how the algorithms perform when applied to problems other than those reported in the literature (graph coloring, meeting scheduling, and distributed sensor network)}, www_section = {coordination, DCOP, distributed constraint optimization, traffic control}, isbn = {978-0-9817381-1-6}, url = {http://dl.acm.org/citation.cfm?id=1402298.1402308}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/AAMAS08\%20-\%20DCOP\%20algorithms\%20in\%20a\%20real\%20world\%20problem.pdf}, }
@article{Kaafar:2007:SIC:1282427.1282388, title = {Securing Internet Coordinate Embedding Systems}, author = {Kaafar, Mohamed Ali and Laurent Mathy and Barakat, Chadi and Salamatian, Kave and Turletti, Thierry and Dabbous, Walid}, journal = {SIGCOMM Computer Communication Review}, volume = {37}, year = {2007}, month = {August}, address = {New York, NY, USA}, pages = {61--72}, publisher = {ACM}, abstract = {This paper addresses the issue of the security of Internet Coordinate Systems,by proposing a general method for malicious behavior detection during coordinate computations. We first show that the dynamics of a node, in a coordinate system without abnormal or malicious behavior, can be modeled by a Linear State Space model and tracked by a Kalman filter. Then we show, that the obtained model can be generalized in the sense that the parameters of a filtercalibrated at a node can be used effectively to model and predict the dynamic behavior at another node, as long as the two nodes are not too far apart in the network. This leads to the proposal of a Surveyor infrastructure: Surveyor nodes are trusted, honest nodes that use each other exclusively to position themselves in the coordinate space, and are therefore immune to malicious behavior in the system.During their own coordinate embedding, other nodes can thenuse the filter parameters of a nearby Surveyor as a representation of normal, clean system behavior to detect and filter out abnormal or malicious activity. A combination of simulations and PlanetLab experiments are used to demonstrate the validity, generality, and effectiveness of the proposed approach for two representative coordinate embedding systems, namely Vivaldi and NPS}, www_section = {internet coordinates-embedding systems, kalman filter, malicious behavior detection, network positioning systems, security}, issn = {0146-4833}, doi = {http://doi.acm.org/10.1145/1282427.1282388}, url = {http://doi.acm.org/10.1145/1282427.1282388}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/SIGCOMM\%20Comput.\%20Commun.\%20Rev.\%20-\%20Securing\%20Internet\%20Coordinate\%20Embedding\%20Systems.pdf}, }
@conference{Kamvar:2003:EAR:775152.775242, title = {The EigenTrust algorithm for reputation management in P2P networks}, author = {Kamvar, Sepandar D. and Schlosser, Mario T. and Hector Garcia-Molina}, booktitle = {WWW'03. Proceedings of the 12th International Conference on World Wide Web}, organization = {ACM}, year = {2003}, month = {May}, address = {Budapest, Hungary}, pages = {640--651}, publisher = {ACM}, series = {WWW '03}, abstract = {Peer-to-peer file-sharing networks are currently receiving much attention as a means of sharing and distributing information. However, as recent experience shows, the anonymous, open nature of these networks offers an almost ideal environment for the spread of self-replicating inauthentic files.We describe an algorithm to decrease the number of downloads of inauthentic files in a peer-to-peer file-sharing network that assigns each peer a unique global trust value, based on the peer's history of uploads. We present a distributed and secure method to compute global trust values, based on Power iteration. By having peers use these global trust values to choose the peers from whom they download, the network effectively identifies malicious peers and isolates them from the network.In simulations, this reputation system, called EigenTrust, has been shown to significantly decrease the number of inauthentic files on the network, even under a variety of conditions where malicious peers cooperate in an attempt to deliberately subvert the system}, www_section = {distributed eigenvector computation, peer-to-peer networking, reputation}, isbn = {1-58113-680-3}, doi = {http://doi.acm.org/10.1145/775152.775242}, url = {http://doi.acm.org/10.1145/775152.775242}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/WWW\%2703\%20-\%20The\%20EigenTrust\%20algorithm.pdf}, }
@conference{Karlof02securerouting, title = {Secure Routing in Wireless Sensor Networks: Attacks and Countermeasures}, author = {Chris Karlof and David Wagner}, booktitle = {In First IEEE International Workshop on Sensor Network Protocols and Applications}, year = {2002}, pages = {113--127}, abstract = {We consider routing security in wireless sensor networks. Many sensor network routing protocols have been proposed, but none of them have been designed with security as a goal. We propose security goals for routing in sensor networks, show how attacks against ad-hoc and peer-to-peer networks can be adapted into powerful attacks against sensor networks, introduce two classes of novel attacks against sensor networks --- sinkholes and HELLO floods, and analyze the security of all the major sensor network routing protocols. We describe crippling attacks against all of them and suggest countermeasures and design considerations. This is the first such analysis of secure routing in sensor networks}, www_section = {ad-hoc networks, P2P, sensor networks}, url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.13.4672}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/sensor-route-security_0.pdf}, }
@conference{Karnstedt2006SimilarityQueries, title = {Similarity Queries on Structured Data in Structured Overlays}, author = {Karnstedt, Marcel and Sattler, Kai-Uwe and Manfred Hauswirth and Roman Schmidt}, booktitle = {Proceedings of the 22nd International Conference on Data Engineering Workshops}, organization = {IEEE Computer Society}, year = {2006}, address = {Washington, DC, USA}, pages = {0--32}, publisher = {IEEE Computer Society}, series = {ICDEW '06}, isbn = {0-7695-2571-7}, doi = {10.1109/ICDEW.2006.137}, url = {http://dx.doi.org/10.1109/ICDEW.2006.137}, %%%%% ERROR: Missing field % www_section = {?????}, }
@conference{Karp2004/ALGO, title = {Finite length analysis of LT codes}, author = {Richard Karp and Luby, Michael and M. Amin Shokrollahi}, booktitle = {Proceedings of the IEEE International Symposium on Information Theory, ISIT 2004}, year = {2004}, month = {January}, pages = {0--39}, abstract = {This paper provides an efficient method for analyzing the error probability of the belief propagation (BP) decoder applied to LT Codes. Each output symbol is generated independently by sampling from a distribution and adding the input symbols corresponding to the support of the sampled vector}, www_section = {algoweb_ldpc}, isbn = {0-7695-1822-2}, doi = {10.1109/ISIT.2004.1365074}, url = {http://ieeexplore.ieee.org/xpl/freeabs_all.jsp?arnumber=1181950}, }
@article{Kermarrec2013, title = {Towards a Personalized Internet: a Case for a Full Decentralization}, author = {Kermarrec, Anne-Marie}, journal = {Philosophical Transactions. Series A, Mathematical, Physical, and Engineering Sciences}, volume = {371}, number = {1987}, year = {2013}, month = {March}, abstract = {The Web has become a user-centric platform where users post, share, annotate, comment and forward content be it text, videos, pictures, URLs, etc. This social dimension creates tremendous new opportunities for information exchange over the Internet, as exemplified by the surprising and exponential growth of social networks and collaborative platforms. Yet, niche content is sometimes difficult to retrieve using traditional search engines because they target the mass rather than the individual. Likewise, relieving users from useless notification is tricky in a world where there is so much information and so little of interest for each and every one of us. We argue that ultra-specific content could be retrieved and disseminated should search and notification be personalized to fit this new setting. We also argue that users' interests should be implicitly captured by the system rather than relying on explicit classifications simply because the world is by nature unstructured, dynamic and users do not want to be hampered in their actions by a tight and static framework. In this paper, we review some existing personalization approaches, most of which are centralized. We then advocate the need for fully decentralized systems because personalization raises two main issues. Firstly, personalization requires information to be stored and maintained at a user granularity which can significantly hurt the scalability of a centralized solution. Secondly, at a time when the {\textquoteleft}big brother is watching you' attitude is prominent, users may be more and more reluctant to give away their personal data to the few large companies that can afford such personalization. We start by showing how to achieve personalization in decentralized systems and conclude with the research agenda ahead}, issn = {1364-503X}, doi = {10.1098/rsta.2012.0380}, www_section = {Unsorted}, url = {https://bibliography.gnunet.org}, }
@conference{Khorshadi:2005:DPR:1090948.1091369, title = {Determining the Peer Resource Contributions in a P2P Contract}, author = {Khorshadi, Behrooz and Liu, Xin and Dipak Ghosal}, booktitle = {HOT-P2P 2005. Proceedings of the Second International Workshop on Hot Topics in Peer-to-Peer Systems}, organization = {IEEE Computer Society}, year = {2005}, month = {July}, address = {La Jolla, California, USA}, pages = {2--9}, publisher = {IEEE Computer Society}, abstract = {In this paper we study a scheme called P2P contract which explicitly specifies the resource contributions that are required from the peers. In particular, we consider a P2P file sharing system in which when a peer downloads the file it is required to serve the file to upto N other peers within a maximum period of time T. We study the behavior of this contribution scheme in both centralized and decentralized P2P networks. In a centralized architecture, new requests are forwarded to a central server which hands out the contract along with a list of peers from where the file can be downloaded. We show that a simple fixed contract (i.e., fixed values of N and T) is sufficient to create the required server capacity which adapts to the load. Furthermore, we show that T, the time part of the contract is a more important control parameter than N. In the case of a decentralized P2P architecture, each new request is broadcast to a certain neighborhood determined by the time-to-live (TTL) parameter. Each server receiving the request independently doles out a contract and the requesting peer chooses the one which is least constraining. If there are no servers in the neighborhood, the request fails. To achieve a good request success ratio, we propose an adaptive scheme to set the contracts without requiring global information. Through both analysis and simulation, we show that the proposed scheme adapts to the load and achieves low request failure rate with high server efficiency}, www_section = {contracts, P2P, peer resource contribution, peer-to-peer networking}, isbn = {0-7695-2417-6}, doi = {10.1109/HOT-P2P.2005.9}, url = {http://dl.acm.org/citation.cfm?id=1090948.1091369}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/HOT-P2P\%2705\%20-\%20Khorshadi\%2C\%20Liu\%20\%26\%20Ghosal.pdf}, }
@conference{Kiran04totalrecall:, title = {Total Recall: System Support for Automated Availability Management}, author = {Ranjita Bhagwan Kiran and Kiran Tati and Yu-chung Cheng and Stefan Savage and Geoffrey M. Voelker}, booktitle = {In NSDI}, year = {2004}, pages = {337--350}, abstract = {Availability is a storage system property that is both highly desired and yet minimally engineered. While many systems provide mechanisms to improve availability--such as redundancy and failure recovery--how to best configure these mechanisms is typically left to the system manager. Unfortunately, few individuals have the skills to properly manage the trade-offs involved, let alone the time to adapt these decisions to changing conditions. Instead, most systems are configured statically and with only a cursory understanding of how the configuration will impact overall performance or availability. While this issue can be problematic even for individual storage arrays, it becomes increasingly important as systems are distributed--and absolutely critical for the wide-area peer-to-peer storage infrastructures being explored. This paper describes the motivation, architecture and implementation for a new peer-to-peer storage system, called TotalRecall, that automates the task of availability management. In particular, the TotalRecall system automatically measures and estimates the availability of its constituent host components, predicts their future availability based on past behavior, calculates the appropriate redundancy mechanisms and repair policies, and delivers user-specified availability while maximizing efficiency}, www_section = {P2P}, url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.10.9775}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/recall.pdf}, }
@conference{Kleinberg:2004:NFD:982792.982803, title = {Network failure detection and graph connectivity}, author = {Kleinberg, Jon and Sandler, Mark and Slivkins, Aleksandrs}, booktitle = {SODA'04--Proceedings of the Fifteenth Annual ACM-SIAM Symposium on Discrete Algorithms}, organization = {Society for Industrial and Applied Mathematics}, year = {2004}, month = {January}, address = {New Orleans, Louisiana}, pages = {76--85}, publisher = {Society for Industrial and Applied Mathematics}, series = {SODA '04}, abstract = {We consider a model for monitoring the connectivity of a network subject to node or edge failures. In particular, we are concerned with detecting ({\epsilon}, k)-failures: events in which an adversary deletes up to network elements (nodes or edges), after which there are two sets of nodes A and B, each at least an {\epsilon} fraction of the network, that are disconnected from one another. We say that a set D of nodes is an ({\epsilon} k)-detection set if, for any ({\epsilon} k)-failure of the network, some two nodes in D are no longer able to communicate; in this way, D "witnesses" any such failure. Recent results show that for any graph G, there is an is ({\epsilon} k)-detection set of size bounded by a polynomial in k and {\epsilon}, independent of the size of G.In this paper, we expose some relationships between bounds on detection sets and the edge-connectivity {\lambda} and node-connectivity {\kappa} of the underlying graph. Specifically, we show that detection set bounds can be made considerably stronger when parameterized by these connectivity values. We show that for an adversary that can delete {\kappa}{\lambda} edges, there is always a detection set of size O(({\kappa}/{\epsilon}) log (1/{\epsilon})) which can be found by random sampling. Moreover, an ({\epsilon}, \&lambda)-detection set of minimum size (which is at most 1/{\epsilon}) can be computed in polynomial time. A crucial point is that these bounds are independent not just of the size of G but also of the value of {\lambda}.Extending these bounds to node failures is much more challenging. The most technically difficult result of this paper is that a random sample of O(({\kappa}/{\epsilon}) log (1/{\epsilon})) nodes is a detection set for adversaries that can delete a number of nodes up to {\kappa}, the node-connectivity.For the case of edge-failures we use VC-dimension techniques and the cactus representation of all minimum edge-cuts of a graph; for node failures, we develop a novel approach for working with the much more complex set of all minimum node-cuts of a graph}, www_section = {failure detection, graph connectivity, network}, isbn = {0-89871-558-X}, url = {http://dl.acm.org/citation.cfm?id=982792.982803}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/SODA\%2704\%20-\%20Network\%20failure\%20detection\%20and\%20graph\%20connectivity\%250A.pdf}, }
@conference{Klemm03aspecial-purpose, title = {A Special-Purpose Peer-to-Peer File Sharing System for Mobile Ad Hoc Networks}, author = {Alexander Klemm and Er Klemm and Christoph Lindemann and Oliver Waldhorst}, booktitle = {A Special-Purpose Peer-to-Peer File Sharing S