The GNUnet Bibliography: BibTeX

The GNUnet Bibliography | BibTeX records

By topic | By date | By author


10.1109/ICPP.2003.1240580

@article{10.1109/ICPP.2003.1240580,
  title = {HIERAS: A DHT Based Hierarchical P2P Routing Algorithm}, 
  author = {Zhiyong Xu and Rui Min and Yiming Hu}, 
  journal = {Parallel Processing, International Conference on}, 
  year = {2003}, 
  address = {Los Alamitos, CA, USA}, 
  pages = {0--187}, 
  publisher = {IEEE Computer Society}, 
  abstract = {Routing algorithm has great influence on system overall performance in
        Peer-to-Peer (P2P) applications. In current DHT based routing algorithms, routing
        tasks are distributed across all system peers. However, a routing hop could
        happen between two widely separated peers with high network link latency which
        greatly increases system routing overheads. In this paper, we propose a new P2P
        routing algorithm--- HIERAS to relieve this problem, it keeps scalability
        property of current DHT algorithms and improves system routing performance by the
        introduction of hierarchical structure. In HIERAS, we create several lower level
        P2P rings besides the highest level P2P ring. A P2P ring is a subset of the
        overall P2P overlay network. We create P2P rings in such a strategy that the
        average link latency between two peers in lower level rings is much smaller than
        higher level rings. Routing tasks are first executed in lower level rings before
        they go up to higher level rings, a large portion of routing hops previously
        executed in the global P2P ring are now replaced by hops in lower level rings,
        thus routing overheads can be reduced. The simulation results show HIERAS routing
        algorithm can significantly improve P2P system routing performance}, 
  www_section = {distributed hash table, P2P}, 
  issn = {0190-3918}, 
  doi = {10.1109/ICPP.2003.1240580}, 
  url = {http://www.computer.org/portal/web/csdl/doi/10.1109/ICPP.2003.1240580}, 
}
10.1109/MASCOT.2005.73
@article{10.1109/MASCOT.2005.73,
  title = {The Feasibility of DHT-based Streaming Multicast}, 
  author = {Stefan Birrer and Fabian E. Bustamante}, 
  journal = {2012 IEEE 20th International Symposium on Modeling, Analysis and Simulation of
        Computer and Telecommunication Systems}, 
  year = {2005}, 
  address = {Los Alamitos, CA, USA}, 
  pages = {288--298}, 
  publisher = {IEEE Computer Society}, 
  issn = {1526-7539}, 
  doi = {http://doi.ieeecomputersociety.org/10.1109/MASCOT.2005.73}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/SBirrer-dhtBasedMulticast_0.pdf},
%%%%% ERROR: Missing field
% www_section = {?????},
}
10.1109/MDSO.2005.31
@article{10.1109/MDSO.2005.31,
  title = {Free Riding on Gnutella Revisited: The Bell Tolls?}, 
  author = {Daniel Hughes and Geoff Coulson and James Walkerdine}, 
  journal = {IEEE Distributed Systems Online}, 
  volume = {6}, 
  year = {2005}, 
  month = {June}, 
  address = {Los Alamitos, CA, USA}, 
  chapter = {1}, 
  publisher = {IEEE Computer Society}, 
  abstract = {Individuals who use peer-to-peer (P2P) file-sharing networks such as Gnutella
        face a social dilemma. They must decide whether to contribute to the common good
        by sharing files or to maximize their personal experience by free riding,
        downloading files while not contributing any to the network. Individuals gain no
        personal benefits from uploading files (in fact, it's inconvenient), so it's
        "rational" for users to free ride. However, significant numbers of free riders
        degrade the entire system's utility, creating a "tragedy of the digital commons."
        In this article, a new analysis of free riding on the Gnutella network updates
        data from 2000 and points to an increasing downgrade in the network's overall
        performance and the emergence of a "metatragedy" of the commons among Gnutella
        developers}, 
  www_section = {distributed systems, free riding, Gnutella, peer-to-peer networking}, 
  issn = {1541-4922}, 
  doi = {http://doi.ieeecomputersociety.org/10.1109/MDSO.2005.31}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/IEEE\%20DSO\%20-\%20Free\%20riding\%20on\%20Gnutella\%20revisited.pdf},
}
10.1109/MOBIQUITOUS.2005.29
@article{10.1109/MOBIQUITOUS.2005.29,
  title = {Exploiting co-location history for ef.cient service selection in ubiquitous
        computing systems}, 
  author = {Alexandros Karypidis and Spyros Lalis}, 
  journal = {Mobile and Ubiquitous Systems, Annual International Conference on}, 
  year = {2005}, 
  address = {Los Alamitos, CA, USA}, 
  pages = {202--212}, 
  publisher = {IEEE Computer Society}, 
  abstract = {As the ubiquitous computing vision materializes, the number and diversity of
        digital elements in our environment increases. Computing capability comes in
        various forms and is embedded in different physical objects, ranging from
        miniature devices such as human implants and tiny sensor particles, to large
        constructions such as vehicles and entire buildings. The number of possible
        interactions among such elements, some of which may be invisible or offer similar
        functionality, is growing fast so that it becomes increasingly hard to combine or
        select between them. Mechanisms are thus required for intelligent matchmaking
        that will achieve controlled system behavior, yet without requiring the user to
        continuously input desirable options in an explicit manner. In this paper we
        argue that information about the colocation relationship of computing elements is
        quite valuable in this respect and can be exploited to guide automated service
        selection with minimal or no user involvement. We also discuss the implementation
        of such mechanism that is part of our runtime system for smart objects}, 
  isbn = {0-7695-2375-7}, 
  doi = {10.1109/MOBIQUITOUS.2005.29}, 
  url = {http://www.computer.org/portal/web/csdl/doi/10.1109/MOBIQUITOUS.2005.29}, 
%%%%% ERROR: Missing field
% www_section = {?????},
}
10.1109/P2P.2001.990421
@article{10.1109/P2P.2001.990421,
  title = {Search in JXTA and Other Distributed Networks}, 
  author = {Sherif Botros and Steve Waterhouse}, 
  journal = {Peer-to-Peer Computing, IEEE International Conference on}, 
  year = {2001}, 
  address = {Los Alamitos, CA, USA}, 
  pages = {0--0030}, 
  publisher = {IEEE Computer Society}, 
  isbn = {0-7695-1503-7}, 
  doi = {http://doi.ieeecomputersociety.org/10.1109/P2P.2001.990421}, 
  url = {https://bibliography.gnunet.org}, 
%%%%% ERROR: Missing field
% www_section = {?????},
}
10.1109/PERSER.2005.1506410
@article{10.1109/PERSER.2005.1506410,
  title = {Service discovery using volunteer nodes for pervasive environments}, 
  author = {Mijeom Kim and Mohan Kumar and Behrooz Shirazi}, 
  journal = {International Conference on Pervasive Services}, 
  year = {2005}, 
  address = {Los Alamitos, CA, USA}, 
  pages = {188--197}, 
  publisher = {IEEE Computer Society}, 
  abstract = {We propose a service discovery architecture called VSD (service discovery
        based on volunteers) for heterogeneous and dynamic pervasive computing
        environments. The proposed architecture uses a small subset of the nodes called
        volunteers that perform directory services. Relatively stable and capable nodes
        serve as volunteers, thus recognizing node heterogeneity in terms of mobility and
        capability. We discuss characteristics of VSD architecture and methods to improve
        connectivity among volunteers for higher discovery rate. By showing that VSD
        performs quite well compared to a broadcast based scheme in MANET scenarios, we
        validate that VSD is a flexible and adaptable architecture appropriate for
        dynamic pervasive computing environments. VSD incorporates several novel
        features: i) handles dynamism and supports self-reconfiguration; ii) provides
        physical locality and scalability; and iii) improves reliability and copes with
        uncertainty through redundancy by forming overlapped clusters}, 
  isbn = {0-7803-9032-6}, 
  doi = {10.1109/PERSER.2005.1506410}, 
  url = {http://www.computer.org/portal/web/csdl/doi/10.1109/PERSER.2005.1506410}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/31.pdf}, 
%%%%% ERROR: Missing field
% www_section = {?????},
}
10.1109/PTP.2003.1231513
@conference{10.1109/PTP.2003.1231513,
  title = {Identity Crisis: Anonymity vs. Reputation in P2P Systems}, 
  author = {Marti, Sergio and Hector Garcia-Molina}, 
  booktitle = {P2P'03. Proceecings of the 3rd International Conference on Peer-to-Peer
        Computing}, 
  organization = {IEEE Computer Society}, 
  year = {2003}, 
  month = {September}, 
  address = {Link{\"o}ping, Sweden}, 
  pages = {0--134}, 
  publisher = {IEEE Computer Society}, 
  abstract = {The effectiveness of reputation systems for peer-to-peer resource-sharing
        networks is largely dependent on the reliability of the identities used by peers
        in the network. Much debate has centered around how closely one's pseudoidentity
        in the network should be tied to their real-world identity, and how that identity
        is protected from malicious spoofing. In this paper we investigate the cost in
        efficiency of two solutions to the identity problem for peer-to-peer reputation
        systems. Our results show that, using some simple mechanisms, reputation systems
        can provide a factor of 4 to 20 improvement in performance over no reputation
        system, depending on the identity model used}, 
  www_section = {anonymity, identity, identity model, P2P, peer-to-peer networking,
        reliability, reputation, reputation system}, 
  isbn = {0-7695-2023-5}, 
  doi = {http://doi.ieeecomputersociety.org/10.1109/PTP.2003.1231513}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/P2P\%2703\%20-\%20Identity\%20crisis\%3A\%20anonymity\%20vs\%20reputation.pdf},
}
10.1109/SFCS.2002.1181950
@article{10.1109/SFCS.2002.1181950,
  title = {LT Codes}, 
  author = {Luby, Michael}, 
  journal = {Foundations of Computer Science, Annual IEEE Symposium on}, 
  year = {2002}, 
  address = {Los Alamitos, CA, USA}, 
  pages = {0--271}, 
  publisher = {IEEE Computer Society}, 
  abstract = {We introduce LT codes, the first rateless erasure codes that are very
        efficient as the data length grows}, 
  www_section = {coding theory}, 
  isbn = {0-7695-1822-2}, 
  issn = {0272-5428}, 
  doi = {10.1109/SFCS.2002.1181950}, 
  url = {http://www.computer.org/portal/web/csdl/abs/proceedings/focs/2002/1822/00/18220271abs.htm},
}
10.1109/SP.1980.10006
@article{10.1109/SP.1980.10006,
  title = {Protocols for Public Key Cryptosystems}, 
  author = {Ralph C. Merkle}, 
  journal = {Security and Privacy, IEEE Symposium on}, 
  year = {1980}, 
  address = {Los Alamitos, CA, USA}, 
  pages = {0--122}, 
  publisher = {IEEE Computer Society}, 
  abstract = {New Cryptographic protocols which take full advantage of the unique
        properties of public key cryptosystems are now evolving. Several protocols for
        public key distribution and for digital signatures are briefly compared with each
        other and with the conventional alternative}, 
  issn = {1540-7993}, 
  doi = {10.1109/SP.1980.10006}, 
  url = {http://www.computer.org/portal/web/csdl/doi/10.1109/SP.1980.10006}, 
  www_section = {Unsorted}, 
}
10.1109/WOWMOM.2007.4351805
@article{10.1109/WOWMOM.2007.4351805,
  title = {A Game Theoretic Model of a Protocol for Data Possession Verification}, 
  author = {Nouha Oualha and Pietro Michiardi and Yves Roudier}, 
  journal = {A World of Wireless, Mobile and Multimedia Networks, International Symposium
        on}, 
  year = {2007}, 
  address = {Los Alamitos, CA, USA}, 
  pages = {1--6}, 
  publisher = {IEEE Computer Society}, 
  abstract = {This paper discusses how to model a protocol for the verification of data
        possession intended to secure a peer-to-peer storage application. The
        verification protocol is a primitive for storage assessment, and indirectly
        motivates nodes to behave cooperatively within the application. The capability of
        the protocol to enforce cooperation between a data holder and a data owner is
        proved theoretically by modeling the verification protocol as a Bayesian game,
        and demonstrating that the solution of the game is an equilibrium where both
        parties are cooperative}, 
  www_section = {P2P}, 
  isbn = {978-1-4244-0992-1}, 
  doi = {10.1109/WOWMOM.2007.4351805}, 
  url = {http://www.computer.org/portal/web/csdl/doi/10.1109/WOWMOM.2007.4351805}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/oualno-070618.pdf}, 
}
1007919
@conference{1007919,
  title = {Simple efficient load balancing algorithms for peer-to-peer systems}, 
  author = {David Karger and Ruhl, Matthias}, 
  booktitle = {SPAA '04: Proceedings of the sixteenth annual ACM symposium on Parallelism
        in algorithms and architectures}, 
  organization = {ACM}, 
  year = {2004}, 
  address = {New York, NY, USA}, 
  pages = {36--43}, 
  publisher = {ACM}, 
  abstract = {Load balancing is a critical issue for the efficient operation of
        peer-to-peer networks. We give two new load-balancing protocols whose provable
        performance guarantees are within a constant factor of optimal. Our protocols
        refine the consistent hashing data structure that underlies the Chord (and
        Koorde) P2P network. Both preserve Chord's logarithmic query time and
        near-optimal data migration cost.Consistent hashing is an instance of the
        distributed hash table (DHT) paradigm for assigning items to nodes in a
        peer-to-peer system: items and nodes are mapped to a common address space, and
        nodes have to store all items residing closeby in the address space.Our first
        protocol balances the distribution of the key address space to nodes, which
        yields a load-balanced system when the DHT maps items "randomly" into the address
        space. To our knowledge, this yields the first P2P scheme simultaneously
        achieving O(log n) degree, O(log n) look-up cost, and constant-factor load
        balance (previous schemes settled for any two of the three).Our second protocol
        aims to directly balance the distribution of items among the nodes. This is
        useful when the distribution of items in the address space cannot be randomized.
        We give a simple protocol that balances load by moving nodes to arbitrary
        locations "where they are needed." As an application, we use the last protocol to
        give an optimal implementation of a distributed data structure for range searches
        on ordered data}, 
  www_section = {load balancing, P2P}, 
  isbn = {1-58113-840-7}, 
  doi = {10.1145/1007912.1007919}, 
  url = {http://portal.acm.org/citation.cfm?id=1007919$\#$}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.88.2405.pdf}, 
}
1013317
@conference{1013317,
  title = {Energy-aware demand paging on NAND flash-based embedded storages}, 
  author = {Chanik Park and Kang, Jeong-Uk and Park, Seon-Yeong and Kim, Jin-Soo}, 
  booktitle = {ISLPED '04: Proceedings of the 2004 international symposium on Low power
        electronics and design}, 
  organization = {ACM}, 
  year = {2004}, 
  address = {New York, NY, USA}, 
  pages = {338--343}, 
  publisher = {ACM}, 
  abstract = {The ever-increasing requirement for high-performance and huge-capacity
        memories of emerging embedded applications has led to the widespread adoption of
        SDRAM and NAND flash memory as main and secondary memories, respectively. In
        particular, the use of energy consuming memory, SDRAM, has become burdensome in
        battery-powered embedded systems. Intuitively, though demand paging can be used
        to mitigate the increasing requirement of main memory size, its applicability
        should be deliberately elaborated since NAND flash memory has asymmetric
        operation characteristics in terms of performance and energy consumption.In this
        paper, we present energy-aware demand paging technique to lower the energy
        consumption of embedded systems considering the characteristics of interactive
        embedded applications with large memory footprints. We also propose a flash
        memory-aware page replacement policy that can reduce the number of write and
        erase operations in NAND flash memory. With real-life workloads, we show the
        system-wide Energy{\textperiodcentered}Delay can be reduced by 15~30\% compared
        to the traditional shadowing architecture}, 
  isbn = {1-58113-929-2}, 
  doi = {10.1145/1013235.1013317}, 
  url = {http://doi.acm.org/10.1145/1013235.1013317}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/2004-ISLPED-Energy-aware\%20demand\%20paging\%20on\%20NAND\%20flash-based\%20embedded\%20storages.pdf},
%%%%% ERROR: Missing field
% www_section = {?????},
}
1015507
@article{1015507,
  title = {Mercury: supporting scalable multi-attribute range queries}, 
  author = {Bharambe, Ashwin R. and Agrawal, Mukesh and Seshan, Srinivasan}, 
  journal = {SIGCOMM Comput. Commun. Rev}, 
  volume = {34}, 
  number = {4}, 
  year = {2004}, 
  address = {New York, NY, USA}, 
  pages = {353--366}, 
  publisher = {ACM}, 
  abstract = {This paper presents the design of Mercury, a scalable protocol for supporting
        multi-attribute range-based searches. Mercury differs from previous range-based
        query systems in that it supports multiple attributes as well as performs
        explicit load balancing. To guarantee efficient routing and load balancing,
        Mercury uses novel light-weight sampling mechanisms for uniformly sampling random
        nodes in a highly dynamic overlay network. Our evaluation shows that Mercury is
        able to achieve its goals of logarithmic-hop routing and near-uniform load
        balancing.We also show that Mercury can be used to solve a key problem for an
        important class of distributed applications: distributed state maintenance for
        distributed games. We show that the Mercury-based solution is easy to use, and
        that it reduces the game's messaging overheard significantly compared to a
        na{\"\i}ve approach}, 
  www_section = {distributed hash table, load balancing, mercury, P2P, random sampling,
        range queries}, 
  issn = {0146-4833}, 
  doi = {10.1145/1030194.1015507}, 
  url = {http://portal.acm.org/citation.cfm?id=1030194.1015507$\#$}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/p625-bharambe1.pdf}, 
}
1021938
@conference{1021938,
  title = {Erasure Code Replication Revisited}, 
  author = {Lin, W. K. and Chiu, Dah Ming and Lee, Y. B.}, 
  booktitle = {P2P '04: Proceedings of the Fourth International Conference on Peer-to-Peer
        Computing}, 
  organization = {IEEE Computer Society}, 
  year = {2004}, 
  address = {Washington, DC, USA}, 
  pages = {90--97}, 
  publisher = {IEEE Computer Society}, 
  abstract = {Erasure coding is a technique for achieving high availability and reliability
        in storage and communication systems. In this paper, we revisit the analysis of
        erasure code replication and point out some situations when whole-file
        replication is preferred. The switchover point (from preferring whole-file
        replication to erasure code replication) is studied, and characterized using
        asymptotic analysis. We also discuss the additional considerations in building
        erasure code replication systems}, 
  isbn = {0-7695-2156-8}, 
  doi = {10.1109/P2P.2004.17}, 
  url = {http://portal.acm.org/citation.cfm?id=1021938$\#$}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.109.2034.pdf}, 
%%%%% ERROR: Missing field
% www_section = {?????},
}
1026492
@article{1026492,
  title = {Basic Concepts and Taxonomy of Dependable and Secure Computing}, 
  author = {Avizienis, Algirdas and Laprie, Jean-Claude and Randell, Brian and Carl
        Landwehr}, 
  journal = {IEEE Trans. Dependable Secur. Comput}, 
  volume = {1}, 
  number = {1}, 
  year = {2004}, 
  address = {Los Alamitos, CA, USA}, 
  pages = {11--33}, 
  publisher = {IEEE Computer Society Press}, 
  abstract = {This paper gives the main definitions relating to dependability, a generic
        concept including as special case such attributes as reliability, availability,
        safety, integrity, maintainability, etc. Security brings in concerns for
        confidentiality, in addition to availability and integrity. Basic definitions are
        given first. They are then commented upon, and supplemented by additional
        definitions, which address the threats to dependability and security (faults,
        errors, failures), their attributes, and the means for their achievement (fault
        prevention, fault tolerance, fault removal, fault forecasting). The aim is to
        explicate a set of general concepts, of relevance across a wide range of
        situations and, therefore, helping communication and cooperation among a number
        of scientific and technical communities, including ones that are concentrating on
        particular types of system, of system failures, or of causes of system failures}, 
  www_section = {attack, fault removal, fault-tolerance, index terms-dependability, trust,
        vulnerability}, 
  issn = {1545-5971}, 
  doi = {10.1109/TDSC.2004.2}, 
  url = {http://portal.acm.org/citation.cfm?id=1026488.1026492$\#$}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.88.2793.pdf}, 
}
1038318
@conference{1038318,
  title = {Vulnerabilities and Security Threats in Structured Overlay Networks: A
        Quantitative Analysis}, 
  author = {Srivatsa, Mudhakar and Liu, Ling}, 
  booktitle = {ACSAC '04: Proceedings of the 20th Annual Computer Security Applications
        Conference}, 
  organization = {IEEE Computer Society}, 
  year = {2004}, 
  address = {Washington, DC, USA}, 
  pages = {252--261}, 
  publisher = {IEEE Computer Society}, 
  abstract = {A number of recent applications have been built on distributed hash tables
        (DHTs) based overlay networks. Almost all DHT-based schemes employ a tight
        deterministic data placement and ID mapping schemes. This feature on one hand
        provides assurance on location of data if it exists, within a bounded number of
        hops, and on the other hand, opens doors for malicious nodes to lodge attacks
        that can potentially thwart the functionality of the overlay network. This paper
        studies several serious security threats in DHT-based systems through two
        targeted attacks at the overlay network's protocol layer. The first attack
        explores the routing anomalies that can be caused by malicious nodes returning
        incorrect lookup routes. The second attack targets the ID mapping scheme. We
        disclose that the malicious nodes can target any specific data item in the
        system; and corrupt/modify the data item to its favor. For each of these attacks,
        we provide quantitative analysis to estimate the extent of damage that can be
        caused by the attack; followed by experimental validation and defenses to guard
        the overlay networks from such attacks}, 
  www_section = {distributed hash table, overlay networks, P2P}, 
  isbn = {0-7695-2252-1}, 
  doi = {10.1109/CSAC.2004.50}, 
  url = {http://portal.acm.org/citation.cfm?id=1038254.1038318$\#$}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.3.1198.pdf}, 
}
1039861
@conference{1039861,
  title = {Burt: The Backup and Recovery Tool}, 
  author = {Melski, Eric}, 
  booktitle = {LISA '99: Proceedings of the 13th USENIX conference on System
        administration}, 
  organization = {USENIX Association}, 
  year = {1999}, 
  address = {Berkeley, CA, USA}, 
  pages = {207--218}, 
  publisher = {USENIX Association}, 
  abstract = {Burt is a freely distributed parallel network backup system written at the
        University of Wisconsin, Madison. It is designed to backup large heterogeneous
        networks. It uses the Tcl scripting language and standard backup programs like
        dump(1) and GNUTar to enable backups of a wide variety of data sources, including
        UNIX and Windows NT workstations, AFS based storage, and others. It also uses Tcl
        for the creation of the user interface, giving the system administrator great
        flexibility in customizing the system. Burt supports parallel backups to ensure
        high backup speeds, and checksums to ensure data integrity. The principal
        contribution of Burt is that it provides a powerful I/O engine within the context
        of a flexible scripting language; this combination enables graceful solutions to
        many problems associated with backups of large installations. At our site, we use
        Burt to backup data from 350 workstations and from our AFS servers, a total of
        approximately 900 GB every two weeks}, 
  www_section = {backup}, 
  url = {http://portal.acm.org/citation.cfm?id=1039861$\#$}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.112.7612.pdf}, 
}
1042380
@article{1042380,
  title = {Location Awareness in Unstructured Peer-to-Peer Systems}, 
  author = {Yunhao Liu and Xiao, Li and Liu, Xiaomei and Ni, Lionel M. and Zhang,
        Xiaodong}, 
  journal = {IEEE Trans. Parallel Distrib. Syst}, 
  volume = {16}, 
  number = {2}, 
  year = {2005}, 
  address = {Piscataway, NJ, USA}, 
  pages = {163--174}, 
  publisher = {IEEE Press}, 
  abstract = {Peer-to-Peer (P2P) computing has emerged as a popular model aiming at further
        utilizing Internet information and resources. However, the mechanism of peers
        randomly choosing logical neighbors without any knowledge about underlying
        physical topology can cause a serious topology mismatch between the P2P overlay
        network and the physical underlying network. The topology mismatch problem brings
        great stress in the Internet infrastructure. It greatly limits the performance
        gain from various search or routing techniques. Meanwhile, due to the inefficient
        overlay topology, the flooding-based search mechanisms cause a large volume of
        unnecessary traffic. Aiming at alleviating the mismatching problem and reducing
        the unnecessary traffic, we propose a location-aware topology matching (LTM)
        technique. LTM builds an efficient overlay by disconnecting slow connections and
        choosing physically closer nodes as logical neighbors while still retaining the
        search scope and reducing response time for queries. LTM is scalable and
        completely distributed in the sense that it does not require any global knowledge
        of the whole overlay network. The effectiveness of LTM is demonstrated through
        simulation studies}, 
  www_section = {flooding attacks, location-aware topology, P2P, search efficiency,
        topology matching}, 
  issn = {1045-9219}, 
  doi = {10.1109/TPDS.2005.21}, 
  url = {http://portal.acm.org/citation.cfm?id=1042380$\#$}, 
}
1049775
@conference{1049775,
  title = {Scalable Service Discovery for MANET}, 
  author = {Sailhan, Francoise and Valerie Issarny}, 
  booktitle = {PERCOM '05: Proceedings of the Third IEEE International Conference on
        Pervasive Computing and Communications}, 
  organization = {IEEE Computer Society}, 
  year = {2005}, 
  address = {Washington, DC, USA}, 
  pages = {235--244}, 
  publisher = {IEEE Computer Society}, 
  abstract = {Mobile Ad hoc NETworks (MANETs) conveniently complement infrastructure-based
        networks, allowing mobile nodes to spontaneously form a network and share their
        services, including bridging with other networks, either infrastructure-based or
        ad hoc. However, distributed service provisioning over MANETs requires adequate
        support for service discovery and invocation, due to the network{\'y}s dynamics
        and resource constraints of wireless nodes. While a number of existing service
        discovery protocols have shown to be effective for the wireless environment,
        these are mainly aimed at infrastructure-based and/or 1-hop ad hoc wireless
        networks. Some discovery protocols for MANETs have been proposed over the last
        couple of years but they induce significant traffic overhead, and are thus
        primarily suited for small-scale MANETs with few nodes. Building upon the
        evaluation of existing protocols, we introduce a scalable service discovery
        protocol for MANETs, which is based on the homogeneous and dynamic deployment of
        cooperating directories within the network. Scalability of our protocol comes
        from the minimization of the generatedtraffic, and the use of compact directory
        summaries that enable to efficiently locate the directory that most likely caches
        the description of a given service}, 
  www_section = {ad-hoc networks, mobile Ad-hoc networks}, 
  isbn = {0-7695-2299-8}, 
  doi = {10.1109/PERCOM.2005.36}, 
  url = {http://portal.acm.org/citation.cfm?id=1049775$\#$}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.73.7247.pdf}, 
}
1064217
@article{1064217,
  title = {On lifetime-based node failure and stochastic resilience of decentralized
        peer-to-peer networks}, 
  author = {Leonard, Derek and Rai, Vivek and Loguinov, Dmitri}, 
  journal = {SIGMETRICS Perform. Eval. Rev}, 
  volume = {33}, 
  number = {1}, 
  year = {2005}, 
  address = {New York, NY, USA}, 
  pages = {26--37}, 
  publisher = {ACM}, 
  abstract = {To understand how high rates of churn and random departure decisions of
        end-users affect connectivity of P2P networks, this paper investigates resilience
        of random graphs to lifetime-based node failure and derives the expected delay
        before a user is forcefully isolated from the graph and the probability that this
        occurs within his/her lifetime. Our results indicate that systems with
        heavy-tailed lifetime distributions are more resilient than those with
        light-tailed (e.g., exponential) distributions and that for a given average
        degree, k-regular graphs exhibit the highest resilience. As a practical
        illustration of our results, each user in a system with n = 100 billion peers,
        30-minute average lifetime, and 1-minute node-replacement delay can stay
        connected to the graph with probability 1--1 n using only 9 neighbors. This is in
        contrast to 37 neighbors required under previous modeling efforts. We finish the
        paper by showing that many P2P networks are almost surely (i.e., with probability
        1-o(1)) connected if they have no isolated nodes and derive a simple model for
        the probability that a P2P system partitions under churn}, 
  www_section = {P2P, pareto, stochastic lifetime resilience}, 
  issn = {0163-5999}, 
  doi = {10.1145/1071690.1064217}, 
  url = {http://portal.acm.org/citation.cfm?id=1071690.1064217$\#$}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.83.5920.pdf}, 
}
1076
@article{1076,
  title = {New directions in cryptography}, 
  author = {Whitfield Diffie and Martin E. Hellman}, 
  journal = {IEEE Transactions on Information Theory}, 
  volume = {22}, 
  year = {1976}, 
  month = {November}, 
  pages = {644--654}, 
  abstract = {Two kinds of contemporary developments in cryptography are examined. Widening
        applications of teleprocessing have given rise to a need for new types of
        cryptographic systems, which minimize the need for secure key distribution
        channels and supply the equivalent of a written signature. This paper suggests
        ways to solve these currently open problems. It also discusses how the theories
        of communication and computation are beginning to provide the tools to solve
        cryptographic problems of long standing}, 
  www_section = {cryptographic systems, cryptography}, 
  issn = {0018-9448}, 
  doi = {10.1109/TIT.1976.1055638}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/IEEE\%20Trans.\%20on\%20Info.\%20-\%20New\%20directions\%20in\%20cryptography.pdf},
  url = {https://bibliography.gnunet.org}, 
}
1080833
@conference{1080833,
  title = {Architecture and evaluation of an unplanned 802.11b mesh network}, 
  author = {Bicket, John and Aguayo, Daniel and Biswas, Sanjit and Robert Morris}, 
  booktitle = {MobiCom '05: Proceedings of the 11th annual international conference on
        Mobile computing and networking}, 
  organization = {ACM}, 
  year = {2005}, 
  address = {New York, NY, USA}, 
  pages = {31--42}, 
  publisher = {ACM}, 
  abstract = {This paper evaluates the ability of a wireless mesh architecture to provide
        high performance Internet access while demanding little deployment planning or
        operational management. The architecture considered in this paper has unplanned
        node placement (rather than planned topology), omni-directional antennas (rather
        than directional links), and multi-hop routing (rather than single-hop base
        stations). These design decisions contribute to ease of deployment, an important
        requirement for community wireless networks. However, this architecture carries
        the risk that lack of planning might render the network's performance unusably
        low. For example, it might be necessary to place nodes carefully to ensure
        connectivity; the omni-directional antennas might provide uselessly short radio
        ranges; or the inefficiency of multi-hop forwarding might leave some users
        effectively disconnected.The paper evaluates this unplanned mesh architecture
        with a case study of the Roofnet 802.11b mesh network. Roofnet consists of 37
        nodes spread over four square kilometers of an urban area. The network provides
        users with usable performance despite lack of planning: the average inter-node
        throughput is 627 kbits/second, even though the average route has three hops.The
        paper evaluates multiple aspects of the architecture: the effect of node density
        on connectivity and throughput; the characteristics of the links that the routing
        protocol elects to use; the usefulness of the highly connected mesh afforded by
        omni-directional antennas for robustness and throughput; and the potential
        performance of a single-hop network using the same nodes as Roofnet}, 
  www_section = {ad-hoc networks, mesh networks, multi-hop networks, route metrics,
        wireless routing}, 
  isbn = {1-59593-020-5}, 
  doi = {10.1145/1080829.1080833}, 
  url = {http://portal.acm.org/citation.cfm?id=1080833$\#$}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.62.3119.pdf}, 
}
1090700
@conference{1090700,
  title = {Metadata Efficiency in Versioning File Systems}, 
  author = {Soules, Craig A. N. and Goodson, Garth R. and Strunk, John D. and Ganger,
        Gregory R.}, 
  booktitle = {FAST '03: Proceedings of the 2nd USENIX Conference on File and Storage
        Technologies}, 
  organization = {USENIX Association}, 
  year = {2003}, 
  address = {Berkeley, CA, USA}, 
  pages = {43--58}, 
  publisher = {USENIX Association}, 
  abstract = {Versioning file systems retain earlier versions of modified files, allowing
        recovery from user mistakes or system corruption. Unfortunately, conventional
        versioning systems do not efficiently record large numbers of versions. In
        particular, versioned metadata can consume as much space as versioned data. This
        paper examines two space-efficient metadata structures for versioning file
        systems and describes their integration into the Comprehensive Versioning File
        System (CVFS), which keeps all versions of all files. Journal-based metadata
        encodes each metadata version into a single journal entry; CVFS uses this
        structure for inodes and indirect blocks, reducing the associated space
        requirements by 80\%. Multiversion b-trees extend each entrys key with a
        timestamp and keep current and historical entries in a single tree; CVFS uses
        this structure for directories, reducing the associated space requirements by
        99\%. Similar space reductions are predicted via trace analysis for other
        versioning strategies (e.g., on-close versioning). Experiments with CVFS verify
        that its current-version performance is sim-ilar to that of non-versioning file
        systems while reducing overall space needed for history data by a factor of two.
        Although access to historical versions is slower than con-ventional versioning
        systems, checkpointing is shown to mitigate and bound this effect}, 
  www_section = {file systems}, 
  url = {http://portal.acm.org/citation.cfm?id=1090694.1090700$\#$}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/fast03.pdf}, 
}
1095816
@article{1095816,
  title = {BAR fault tolerance for cooperative services}, 
  author = {Aiyer, Amitanand S. and Lorenzo Alvisi and Clement, Allen and Dahlin, Mike and
        Martin, Jean-Philippe and Porth, Carl}, 
  journal = {SIGOPS Oper. Syst. Rev}, 
  volume = {39}, 
  number = {5}, 
  year = {2005}, 
  address = {New York, NY, USA}, 
  pages = {45--58}, 
  publisher = {ACM}, 
  abstract = {This paper describes a general approach to constructing cooperative services
        that span multiple administrative domains. In such environments, protocols must
        tolerate both Byzantine behaviors when broken, misconfigured, or malicious nodes
        arbitrarily deviate from their specification and rational behaviors when selfish
        nodes deviate from their specification to increase their local benefit. The paper
        makes three contributions: (1) It introduces the BAR (Byzantine, Altruistic,
        Rational) model as a foundation for reasoning about cooperative services; (2) It
        proposes a general three-level architecture to reduce the complexity of building
        services under the BAR model; and (3) It describes an implementation of BAR-B the
        first cooperative backup service to tolerate both Byzantine users and an
        unbounded number of rational users. At the core of BAR-B is an asynchronous
        replicated state machine that provides the customary safety and liveness
        guarantees despite nodes exhibiting both Byzantine and rational behaviors. Our
        prototype provides acceptable performance for our application: our BAR-tolerant
        state machine executes 15 requests per second, and our BAR-B backup service can
        back up 100MB of data in under 4 minutes}, 
  www_section = {byzantine fault tolerance, game theory, reliability}, 
  issn = {0163-5980}, 
  doi = {10.1145/1095809.1095816}, 
  url = {http://portal.acm.org/citation.cfm?id=1095816$\#$}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.80.713.pdf}, 
}
1095944
@conference{1095944,
  title = {Impacts of packet scheduling and packet loss distribution on FEC Performances:
        observations and recommendations}, 
  author = {Christoph Neumann and Aur{\'e}lien Francillon and David Furodet}, 
  booktitle = {CoNEXT'05: Proceedings of the 2005 ACM conference on Emerging network
        experiment and technology}, 
  organization = {ACM Press}, 
  year = {2005}, 
  address = {New York, NY, USA}, 
  pages = {166--176}, 
  publisher = {ACM Press}, 
  abstract = {Forward Error Correction (FEC) is commonly used for content broadcasting. The
        performance of the FEC codes largely vary, depending in particular on the code
        used and on the object size, and these parameters have already been studied in
        detail by the community. However the FEC performances are also largely dependent
        on the packet scheduling used during transmission and on the loss pattern
        introduced by the channel. Little attention has been devoted to these aspects so
        far. Therefore the present paper analyzes their impacts on the three FEC codes:
        LDGM Staircase, LDGM Triangle, two large block codes, and Reed-Solomon. Thanks to
        this analysis, we define several recommendations on how to best use these codes,
        depending on the test case and on the channel, which turns out to be of utmost
        importance}, 
  www_section = {forward error correction, LDPC, loss pattern, multicast, packet
        scheduling, Reed-Solomon}, 
  isbn = {1-59593-197-X}, 
  doi = {10.1145/1095921.1095944}, 
  url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.63.8807}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/RR-5578.pdf}, 
}
1096703
@conference{1096703,
  title = {Integrating Portable and Distributed Storage}, 
  author = {Niraj Tolia and Harkes, Jan and Michael Kozuch and Satyanarayanan, Mahadev}, 
  booktitle = {FAST '04: Proceedings of the 3rd USENIX Conference on File and Storage
        Technologies}, 
  organization = {USENIX Association}, 
  year = {2004}, 
  address = {Berkeley, CA, USA}, 
  pages = {227--238}, 
  publisher = {USENIX Association}, 
  abstract = {We describe a technique called lookaside caching that combines the strengths
        of distributed file systems and portable storage devices, while negating their
        weaknesses. In spite of its simplicity, this technique proves to be powerful and
        versatile. By unifying distributed storage and portable storage into a single
        abstraction, lookaside caching allows users to treat devices they carry as merely
        performance and availability assists for distant file servers. Careless use of
        portable storage has no catastrophic consequences. Experimental results show that
        significant performance improvements are possible even in the presence of stale
        data on the portable device}, 
  www_section = {caching proxies, distributed database}, 
  url = {http://portal.acm.org/citation.cfm?id=1096703$\#$}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/integratingpds-fast04.pdf},
}
1103797
@conference{1103797,
  title = {Hydra: a platform for survivable and secure data storage systems}, 
  author = {Lihao Xu}, 
  booktitle = {StorageSS '05: Proceedings of the 2005 ACM workshop on Storage security and
        survivability}, 
  organization = {ACM}, 
  year = {2005}, 
  address = {New York, NY, USA}, 
  pages = {108--114}, 
  publisher = {ACM}, 
  abstract = {This paper introduces Hydra, a platform that we are developing for highly
        survivable and secure data storage systems that distribute information over
        networks and adapt timely to environment changes, enabling users to store and
        access critical data in a continuously available and highly trustable fashion.
        The Hydra platform uses MDS array codes that can be encoded and decoded
        efficiently for distributing and recovering user data. Novel uses of MDS array
        codes in Hydra are discussed, as well as Hydra's design goals, general structures
        and a set of basic operations on user data. We also explore Hydra's applications
        in survivable and secure data storage systems}, 
  www_section = {storage}, 
  isbn = {1-59593-233-X}, 
  doi = {10.1145/1103780.1103797}, 
  url = {http://portal.acm.org/citation.cfm?id=1103797$\#$}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/w8paper13.pdf}, 
}
1108067
@conference{1108067,
  title = {Boundary Chord: A Novel Peer-to-Peer Algorithm for Replica Location Mechanism in
        Grid Environment}, 
  author = {Jin, Hai and Wang, Chengwei and Chen, Hanhua}, 
  booktitle = {ISPAN '05: Proceedings of the 8th International Symposium on Parallel
        Architectures,Algorithms and Networks}, 
  organization = {IEEE Computer Society}, 
  year = {2005}, 
  address = {Washington, DC, USA}, 
  pages = {262--267}, 
  publisher = {IEEE Computer Society}, 
  abstract = {The emerging grids need an efficient replica location mechanism. In the
        experience of developing 1 ChinaGrid Supporting Platform (CGSP), a grid
        middleware that builds a uniform platform supporting multiple grid-based
        applications, we meet a challenge of utilizing the properties of locality in
        replica location process to construct a practical and high performance replica
        location mechanism. The key of the solution to this challenge is to design an
        efficient replica location algorithm that meets above requirements. Some previous
        works have been done to build a replica location mechanism, but they are not
        suitable for replica location in a grid environment with multiple applications
        like ChinaGrid. In this paper, we present a novel peer-to-peer algorithm for
        replica location mechanism, Boundary Chord, which has the merits of locality
        awareness, self-organization, and load balancing. Simulation results show that
        the algorithm has better performance than other structured peer-to-peer solutions
        to the replica location problem}, 
  isbn = {0-7695-2509-1}, 
  doi = {10.1109/ISPAN.2005.21}, 
  url = {http://portal.acm.org/citation.cfm?id=1108067$\#$}, 
%%%%% ERROR: Missing field
% www_section = {?????},
}
1109601
@conference{1109601,
  title = {The rainbow skip graph: a fault-tolerant constant-degree distributed data
        structure}, 
  author = {Goodrich, Michael T. and Nelson, Michael J. and Sun, Jonathan Z.}, 
  booktitle = {SODA '06: Proceedings of the seventeenth annual ACM-SIAM symposium on
        Discrete algorithm}, 
  organization = {ACM}, 
  year = {2006}, 
  address = {New York, NY, USA}, 
  pages = {384--393}, 
  publisher = {ACM}, 
  abstract = {We present a distributed data structure, which we call the rainbow skip
        graph. To our knowledge, this is the first peer-to-peer data structure that
        simultaneously achieves high fault-tolerance, constant-sized nodes, and fast
        update and query times for ordered data. It is a non-trivial adaptation of the
        SkipNet/skip-graph structures of Harvey et al. and Aspnes and Shah, so as to
        provide fault-tolerance as these structures do, but to do so using constant-sized
        nodes, as in the family tree structure of Zatloukal and Harvey. It supports
        successor queries on a set of n items using O(log n) messages with high
        probability, an improvement over the expected O(log n) messages of the family
        tree. Our structure achieves these results by using the following new
        constructs:{\textbullet} Rainbow connections: parallel sets of pointers between
        related components of nodes, so as to achieve good connectivity between
        "adjacent" components, using constant-sized nodes.{\textbullet} Hydra components:
        highly-connected, highly fault-tolerant components of constant-sized nodes, which
        will contain relatively large connected subcomponents even under the failure of a
        constant fraction of the nodes in the component.We further augment the hydra
        components in the rainbow skip graph by using erasure-resilient codes to ensure
        that any large subcomponent of nodes in a hydra component is sufficient to
        reconstruct all the data stored in that component. By carefully maintaining the
        size of related components and hydra components to be O(log n), we are able to
        achieve fast times for updates and queries in the rainbow skip graph. In
        addition, we show how to make the communication complexity for updates and
        queries be worst case, at the expense of more conceptual complexity and a slight
        degradation in the node congestion of the data structure}, 
  www_section = {distributed hash table, Hydra, rainbow, RSG, skip graph, SkipNet}, 
  isbn = {0-89871-605-5}, 
  doi = {http://doi.acm.org/10.1145/1109557.1109601}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/rainbow.pdf}, 
}
1111777
@conference{1111777,
  title = {Data durability in peer to peer storage systems}, 
  author = {Gil Utard and Antoine Vernois}, 
  booktitle = {CCGRID '04: Proceedings of the 2004 IEEE International Symposium on Cluster
        Computing and the Grid}, 
  organization = {IEEE Computer Society}, 
  year = {2004}, 
  address = {Washington, DC, USA}, 
  pages = {90--97}, 
  publisher = {IEEE Computer Society}, 
  abstract = {In this paper we present a quantitative study of data survival in peer to
        peer storage systems. We first recall two main redundancy mechanisms: replication
        and erasure codes, which are used by most peer to peer storage systems like
        OceanStore, PAST or CFS, to guarantee data durability. Second we characterize
        peer to peer systems according to a volatility factor (a peer is free to leave
        the system at anytime) and to an availability factor (a peer is not permanently
        connected to the system). Third we model the behavior of a system as a Markov
        chain and analyse the average life time of data (MTTF) according to the
        volatility and availability factors. We also present the cost of the repair
        process based on these redundancy schemes to recover failed peers. The conclusion
        of this study is that when there is no high availability of peers, a simple
        replication scheme may be more efficient than sophisticated erasure codes}, 
  www_section = {P2P, redundancy, storage}, 
  isbn = {0-7803-8430-X}, 
  url = {http://portal.acm.org/citation.cfm?id=1111777$\#$}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.102.9992.pdf}, 
}
1128335
@conference{1128335,
  title = {OmniStore: A system for ubiquitous personal storage management}, 
  author = {Alexandros Karypidis and Spyros Lalis}, 
  booktitle = {PERCOM '06: Proceedings of the Fourth Annual IEEE International Conference
        on Pervasive Computing and Communications}, 
  organization = {IEEE Computer Society}, 
  year = {2006}, 
  address = {Washington, DC, USA}, 
  pages = {136--147}, 
  publisher = {IEEE Computer Society}, 
  abstract = {As personal area networking becomes a reality, the collective management of
        storage in portable devices such as mobile phones, cameras and music players will
        grow in importance. The increasing wireless communication capability of such
        devices makes it possible for them to interact with each other and implement more
        advanced storage functionality. This paper introduces OmniStore, a system which
        employs a unified data management approach that integrates portable and backend
        storage, but also exhibits self-organizing behavior through spontaneous device
        collaboration}, 
  isbn = {0-7695-2518-0}, 
  doi = {10.1109/PERCOM.2006.40}, 
  url = {http://portal.acm.org/citation.cfm?id=1128335$\#$}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.96.4283.pdf}, 
%%%%% ERROR: Missing field
% www_section = {?????},
}
1133613
@conference{1133613,
  title = {Defending against eclipse attacks on overlay networks}, 
  author = {Singh, Atul and Miguel Castro and Peter Druschel and Antony Rowstron}, 
  booktitle = {EW 11: Proceedings of the 11th workshop on ACM SIGOPS European workshop}, 
  organization = {ACM}, 
  year = {2004}, 
  address = {New York, NY, USA}, 
  pages = {0--21}, 
  publisher = {ACM}, 
  abstract = {Overlay networks are widely used to deploy functionality at edge nodes
        without changing network routers. Each node in an overlay network maintains
        pointers to a set of neighbor nodes. These pointers are used both to maintain the
        overlay and to implement application functionality, for example, to locate
        content stored by overlay nodes. If an attacker controls a large fraction of the
        neighbors of correct nodes, it can "eclipse" correct nodes and prevent correct
        overlay operation. This Eclipse attack is more general than the Sybil attack.
        Attackers can use a Sybil attack to launch an Eclipse attack by inventing a large
        number of seemingly distinct overlay nodes. However, defenses against Sybil
        attacks do not prevent Eclipse attacks because attackers may manipulate the
        overlay maintenance algorithm to mount an Eclipse attack. This paper discusses
        the impact of the Eclipse attack on several types of overlay and it proposes a
        novel defense that prevents the attack by bounding the degree of overlay nodes.
        Our defense can be applied to any overlay and it enables secure implementations
        of overlay optimizations that choose neighbors according to metrics like
        proximity. We present preliminary results that demonstrate the importance of
        defending against the Eclipse attack and show that our defense is effective}, 
  www_section = {attack, overlay networks}, 
  doi = {10.1145/1133572.1133613}, 
  url = {http://portal.acm.org/citation.cfm?id=1133572.1133613$\#$}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.61.5727.pdf}, 
}
1143660
@conference{1143660,
  title = {Estimation based erasure-coding routing in delay tolerant networks}, 
  author = {Liao, Yong and Tan, Kun and Zhang, Zhensheng and Gao, Lixin}, 
  booktitle = {IWCMC '06: Proceedings of the 2006 international conference on Wireless
        communications and mobile computing}, 
  organization = {ACM}, 
  year = {2006}, 
  address = {New York, NY, USA}, 
  pages = {557--562}, 
  publisher = {ACM}, 
  abstract = {Wireless Delay Tolerant Networks (DTNs) are intermittently connected mobile
        wireless networks. Some well-known assumptions of traditional networks are no
        longer true in DTNs, which makes routing in DTNs a challenging problem. We
        observe that mobile nodes in realistic wireless DTNs may always have some
        mobility pattern information which can be used to estimate one node's ability to
        deliver a specific message. This estimation can greatly enhance the routing
        performance in DTNs. Furthermore, we adopt an alternative way to generate
        redundancy using erasure coding. With a fixed overhead, the erasure coding can
        generate a large number of message-blocks instead of a few replications, and
        therefore it allows the transmission of only a portion of message to a relay.
        This can greatly increase the routing diversity when combined with
        estimation-based approaches. We have conducted extensive simulations to evaluate
        the performance of our scheme. The results demonstrate that our scheme
        outperforms previously proposed schemes}, 
  www_section = {delay tolerant network}, 
  isbn = {1-59593-306-9}, 
  doi = {10.1145/1143549.1143660}, 
  url = {http://portal.acm.org/citation.cfm?id=1143549.1143660$\#$}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.61.249.pdf}, 
}
1143821
@conference{1143821,
  title = {A distributed data caching framework for mobile ad hoc networks}, 
  author = {Wang, Ying-Hong and Chao, Chih-Feng and Lin, Shih-Wei and Chen, Wei-Ting}, 
  booktitle = {IWCMC '06: Proceedings of the 2006 international conference on Wireless
        communications and mobile computing}, 
  organization = {ACM}, 
  year = {2006}, 
  address = {New York, NY, USA}, 
  pages = {1357--1362}, 
  publisher = {ACM}, 
  abstract = {Mobile ad hoc networks (MANETs), enabling multi-hop communication between
        mobile nodes, are characterized by variable network topology and the demand for
        efficient dynamic routing protocols. MANETs need no stationary infrastructure or
        preconstructed base station to coordinate packet transmissions or to advertise
        information of network topology for mobile nodes. The objective of this paper is
        to provide MANETs with a distributed data caching framework, which could cache
        the repetition of data and data path, shorten routes and time span to access
        data, and enhance data reusable rate to further reduce the use of bandwidth and
        the consumption of power}, 
  www_section = {mobile Ad-hoc networks}, 
  isbn = {1-59593-306-9}, 
  doi = {10.1145/1143549.1143821}, 
  url = {http://portal.acm.org/citation.cfm?id=1143821$\#$}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.103.426.pdf}, 
}
1148681
@article{1148681,
  title = {Raptor codes}, 
  author = {M. Amin Shokrollahi}, 
  journal = {IEEE/ACM Trans. Netw}, 
  volume = {14}, 
  number = {SI}, 
  year = {2006}, 
  address = {Piscataway, NJ, USA}, 
  pages = {2551--2567}, 
  publisher = {IEEE Press}, 
  abstract = {LT-codes are a new class of codes introduced by Luby for the purpose of
        scalable and fault-tolerant distribution of data over computer networks. In this
        paper, we introduce Raptor codes, an extension of LT-codes with linear time
        encoding and decoding. We will exhibit a class of universal Raptor codes: for a
        given integer k and any real {\epsilon} > 0, Raptor codes in this class produce a
        potentially infinite stream of symbols such that any subset of symbols of size
        k(1 + {\epsilon}) is sufficient to recover the original k symbols with high
        probability. Each output symbol is generated using O(log(1/ {\epsilon}))
        operations, and the original symbols are recovered from the collected ones with
        O(k log(1/{\epsilon})) operations.We will also introduce novel techniques for the
        analysis of the error probability of the decoder for finite length Raptor codes.
        Moreover, we will introduce and analyze systematic versions of Raptor codes,
        i.e., versions in which the first output elements of the coding system coincide
        with the original k elements}, 
  www_section = {802.11, encoding, erasure coding}, 
  issn = {1063-6692}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/raptor.pdf}, 
}
1151692
@article{1151692,
  title = {Energy-aware lossless data compression}, 
  author = {Kenneth Barr and Asanovi{\'c}, Krste}, 
  journal = {ACM Trans. Comput. Syst}, 
  volume = {24}, 
  number = {3}, 
  year = {2006}, 
  month = {January}, 
  address = {New York, NY, USA}, 
  pages = {250--291}, 
  publisher = {ACM}, 
  abstract = {Wireless transmission of a single bit can require over 1000 times more energy
        than a single computation. It can therefore be beneficial to perform additional
        computation to reduce the number of bits transmitted. If the energy required to
        compress data is less than the energy required to send it, there is a net energy
        savings and an increase in battery life for portable computers. This article
        presents a study of the energy savings possible by losslessly compressing data
        prior to transmission. A variety of algorithms were measured on a StrongARM
        SA-110 processor. This work demonstrates that, with several typical compression
        algorithms, there is a actually a net energy increase when compression is applied
        before transmission. Reasons for this increase are explained and suggestions are
        made to avoid it. One such energy-aware suggestion is asymmetric compression, the
        use of one compression algorithm on the transmit side and a different algorithm
        for the receive path. By choosing the lowest-energy compressor and decompressor
        on the test platform, overall energy to send and receive data can be reduced by
        11\% compared with a well-chosen symmetric pair, or up to 57\% over the default
        symmetric zlib scheme}, 
  www_section = {compression, energy-aware, lossless}, 
  issn = {0734-2071}, 
  doi = {10.1145/1151690.1151692}, 
  url = {http://portal.acm.org/citation.cfm?id=1151692$\#$}, 
}
1157518
@conference{1157518,
  title = {iDIBS: An Improved Distributed Backup System}, 
  author = {Morcos, Faruck and Chantem, Thidapat and Little, Philip and Gasiba, Tiago and
        Thain, Douglas}, 
  booktitle = {ICPADS '06: Proceedings of the 12th International Conference on Parallel and
        Distributed Systems}, 
  organization = {IEEE Computer Society}, 
  year = {2006}, 
  address = {Washington, DC, USA}, 
  pages = {58--67}, 
  publisher = {IEEE Computer Society}, 
  abstract = {iDIBS is a peer-to-peer backup system which optimizes the Distributed
        Internet Backup System (DIBS). iDIBS offers increased reliability by enhancing
        the robustness of existing packet transmission mechanism. Reed-Solomon erasure
        codes are replaced with Luby Transform codes to improve computation speed and
        scalability of large files. Lists of peers are automatically stored onto nodes to
        reduce recovery time. To realize these optimizations, an acceptable amount of
        data overhead and an increase in network utilization are imposed on the iDIBS
        system. Through a variety of experiments, we demonstrate that iDIBS significantly
        outperforms DIBS in the areas of data computational complexity, backup
        reliability, and overall performance}, 
  www_section = {backup, P2P, reliability}, 
  isbn = {0-7695-2612-8}, 
  doi = {10.1109/ICPADS.2006.52}, 
  url = {http://portal.acm.org/citation.cfm?id=1156431.1157518$\#$}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.94.4826.pdf}, 
}
1158641
@conference{1158641,
  title = {Security Considerations in Space and Delay Tolerant Networks}, 
  author = {Farrell, Stephen and Cahill, Vinny}, 
  booktitle = {SMC-IT '06: Proceedings of the 2nd IEEE International Conference on Space
        Mission Challenges for Information Technology}, 
  organization = {IEEE Computer Society}, 
  year = {2006}, 
  address = {Washington, DC, USA}, 
  pages = {29--38}, 
  publisher = {IEEE Computer Society}, 
  abstract = {This paper reviews the Internet-inspired security work on delay tolerant
        networking, in particular, as it might apply to space missions, and identifies
        some challenges arising, for both the Internet security community and for space
        missions. These challenges include the development of key management schemes
        suited for space missions as well as a characterization of the actual security
        requirements applying. A specific goal of this paper is therefore to elicit
        feedback from space mission IT specialists in order to guide the development of
        security mechanisms for delay tolerant networking}, 
  isbn = {0-7695-2644-6}, 
  doi = {10.1109/SMC-IT.2006.66}, 
  url = {http://portal.acm.org/citation.cfm?id=1158336.1158641$\#$}, 
%%%%% ERROR: Missing field
% www_section = {?????},
}
1159937
@article{1159937,
  title = {Building an AS-topology model that captures route diversity}, 
  author = {M{\"u}hlbauer, Wolfgang and Feldmann, Anja and Maennel, Olaf and Roughan,
        Matthew and Uhlig, Steve}, 
  journal = {SIGCOMM Comput. Commun. Rev}, 
  volume = {36}, 
  number = {4}, 
  year = {2006}, 
  address = {New York, NY, USA}, 
  pages = {195--206}, 
  publisher = {ACM}, 
  abstract = {An understanding of the topological structure of the Internet is needed for
        quite a number of networking tasks, e. g., making decisions about peering
        relationships, choice of upstream providers, inter-domain traffic engineering.
        One essential component of these tasks is the ability to predict routes in the
        Internet. However, the Internet is composed of a large number of independent
        autonomous systems (ASes) resulting in complex interactions, and until now no
        model of the Internet has succeeded in producing predictions of acceptable
        accuracy.We demonstrate that there are two limitations of prior models: (i) they
        have all assumed that an Autonomous System (AS) is an atomic structure--it is
        not, and (ii) models have tended to oversimplify the relationships between ASes.
        Our approach uses multiple quasi-routers to capture route diversity within the
        ASes, and is deliberately agnostic regarding the types of relationships between
        ASes. The resulting model ensures that its routing is consistent with the
        observed routes. Exploiting a large number of observation points, we show that
        our model provides accurate predictions for unobserved routes, a first step
        towards developing structural mod-els of the Internet that enable real
        applications}, 
  www_section = {border gateway protocol, inter-domain routing, route diversity, routing}, 
  issn = {0146-4833}, 
  doi = {10.1145/1151659.1159937}, 
  url = {http://portal.acm.org/citation.cfm?id=1159937$\#$}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/BuildingAnASTopologyModel.pdf},
}
1161264
@conference{1161264,
  title = {Performance evaluation of chord in mobile ad hoc networks}, 
  author = {Cramer, Curt and Thomas Fuhrmann}, 
  booktitle = {MobiShare '06: Proceedings of the 1st international workshop on
        Decentralized resource sharing in mobile computing and networking}, 
  organization = {ACM}, 
  year = {2006}, 
  address = {New York, NY, USA}, 
  pages = {48--53}, 
  publisher = {ACM}, 
  abstract = {Mobile peer-to-peer applications recently have received growing interest.
        However, it is often assumed that structured peer-to-peer overlays cannot
        efficiently operate in mobile ad hoc networks (MANETs). The prevailing opinion is
        that this is due to the protocols' high overhead cost. In this paper, we show
        that this opinion is misguided.We present a thorough simulation study evaluating
        Chord in the well-known MANET simulator GloMoSim. We found the main issue of
        deploying Chord in a MANET not to be its overhead, but rather the protocol's
        pessimistic timeout and failover strategy. This strategy enables fast lookup
        resolution in spite of highly dynamic node membership, which is a significant
        problem in the Internet context. However, with the inherently higher packet loss
        rate in a MANET, this failover strategy results in lookups being inconsistently
        forwarded even if node membership does not change}, 
  www_section = {Chord, mobile Ad-hoc networks}, 
  isbn = {1-59593-558-4}, 
  doi = {10.1145/1161252.1161264}, 
  url = {http://portal.acm.org/citation.cfm?id=1161264$\#$}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/p48-cramer_ACM2006.pdf},
}
1170307
@conference{1170307,
  title = {Storage Tradeoffs in a Collaborative Backup Service for Mobile Devices}, 
  author = {Ludovic Court{\`e}s and Killijian, Marc-Olivier and Powell, David}, 
  booktitle = {EDCC '06: Proceedings of the Sixth European Dependable Computing
        Conference}, 
  organization = {IEEE Computer Society}, 
  year = {2006}, 
  address = {Washington, DC, USA}, 
  pages = {129--138}, 
  publisher = {IEEE Computer Society}, 
  abstract = {Mobile devices are increasingly relied on but are used in contexts that put
        them at risk of physical dam- age, loss or theft. We consider a fault-tolerance
        ap- proach that exploits spontaneous interactions to imple- ment a collaborative
        backup service. We define the con- straints implied by the mobile
        environment,analyze how they translate into the storage layer of such a backup
        system and examine various design options. The paper concludes with a
        presentation of our prototype imple- mentation of the storage layer, an
        evaluation of the im- pact of several compression methods,and directions for
        future work}, 
  isbn = {0-7695-2648-9}, 
  doi = {10.1109/EDCC.2006.26}, 
  url = {http://portal.acm.org/citation.cfm?id=1170307$\#$}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/slides.pdf}, 
%%%%% ERROR: Missing field
% www_section = {?????},
}
1217937
@article{1217937,
  title = {Fireflies: scalable support for intrusion-tolerant network overlays}, 
  author = {H{\r a}vard Johansen and Allavena, Andr{\'e} and Robbert Van Renesse}, 
  journal = {SIGOPS Oper. Syst. Rev}, 
  volume = {40}, 
  number = {4}, 
  year = {2006}, 
  address = {New York, NY, USA}, 
  pages = {3--13}, 
  publisher = {ACM}, 
  abstract = {This paper describes and evaluates Fireflies, a scalable protocol for
        supporting intrusion-tolerant network overlays. While such a protocol cannot
        distinguish Byzantine nodes from correct nodes in general, Fireflies provides
        correct nodes with a reasonably current view of which nodes are live, as well as
        a pseudo-random mesh for communication. The amount of data sent by correct nodes
        grows linearly with the aggregate rate of failures and recoveries, even if
        provoked by Byzantine nodes. The set of correct nodes form a connected submesh;
        correct nodes cannot be eclipsed by Byzantine nodes. Fireflies is deployed and
        evaluated on PlanetLab}, 
  issn = {0163-5980}, 
  doi = {10.1145/1218063.1217937}, 
  url = {http://portal.acm.org/citation.cfm?id=1218063.1217937$\#$}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Fireflies.pdf}, 
%%%%% ERROR: Missing field
% www_section = {?????},
}
1217950
@article{1217950,
  title = {Experiences in building and operating ePOST, a reliable peer-to-peer
        application}, 
  author = {Mislove, Alan and Post, Ansley and Haeberlen, Andreas and Peter Druschel}, 
  journal = {SIGOPS Oper. Syst. Rev}, 
  volume = {40}, 
  number = {4}, 
  year = {2006}, 
  address = {New York, NY, USA}, 
  pages = {147--159}, 
  publisher = {ACM}, 
  abstract = {Peer-to-peer (p2p) technology can potentially be used to build highly
        reliable applications without a single point of failure. However, most of the
        existing applications, such as file sharing or web caching, have only moderate
        reliability demands. Without a challenging proving ground, it remains unclear
        whether the full potential of p2p systems can be realized.To provide such a
        proving ground, we have designed, deployed and operated a p2p-based email system.
        We chose email because users depend on it for their daily work and therefore
        place high demands on the availability and reliability of the service, as well as
        the durability, integrity, authenticity and privacy of their email. Our system,
        ePOST, has been actively used by a small group of participants for over two
        years.In this paper, we report the problems and pitfalls we encountered in this
        process. We were able to address some of them by applying known principles of
        system design, while others turned out to be novel and fundamental, requiring us
        to devise new solutions. Our findings can be used to guide the design of future
        reliable p2p systems and provide interesting new directions for future research}, 
  www_section = {P2P}, 
  issn = {0163-5980}, 
  doi = {10.1145/1218063.1217950}, 
  url = {http://portal.acm.org/citation.cfm?id=1218063.1217950$\#$}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/epost-eurosys2006.pdf}, 
}
1247343
@conference{1247343,
  title = {A cooperative internet backup scheme}, 
  author = {Mark Lillibridge and Sameh Elnikety and Andrew D. Birrell and Mike Burrows and
        Isard, Michael}, 
  booktitle = {ATEC '03: Proceedings of the annual conference on USENIX Annual Technical
        Conference}, 
  organization = {USENIX Association}, 
  year = {2003}, 
  address = {Berkeley, CA, USA}, 
  pages = {3--3}, 
  publisher = {USENIX Association}, 
  abstract = {We present a novel peer-to-peer backup technique that allows computers
        connected to the Internet to back up their data cooperatively: Each computer has
        a set of partner computers, which collectively hold its backup data. In return,
        it holds a part of each partner's backup data. By adding redundancy and
        distributing the backup data across many partners, a highly-reliable backup can
        be obtained in spite of the low reliability of the average Internet machine.
        Because our scheme requires cooperation, it is potentially vulnerable to several
        novel attacks involving free riding (e.g., holding a partner's data is costly,
        which tempts cheating) or disruption. We defend against these attacks using a
        number of new methods, including the use of periodic random challenges to ensure
        partners continue to hold data and the use of disk-space wasting to make cheating
        unprofitable. Results from an initial prototype show that our technique is
        feasible and very inexpensive: it appears to be one to two orders of magnitude
        cheaper than existing Internet backup services}, 
  www_section = {backup, P2P, redundancy}, 
  url = {http://portal.acm.org/citation.cfm?id=1247343$\#$}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/lillibridge.pdf}, 
}
1247420
@conference{1247420,
  title = {Redundancy elimination within large collections of files}, 
  author = {Kulkarni, Purushottam and Douglis, Fred and Jason Lavoie and Tracey, John M.}, 
  booktitle = {ATEC '04: Proceedings of the annual conference on USENIX Annual Technical
        Conference}, 
  organization = {USENIX Association}, 
  year = {2004}, 
  address = {Berkeley, CA, USA}, 
  pages = {5--5}, 
  publisher = {USENIX Association}, 
  abstract = {Ongoing advancements in technology lead to ever-increasing storage
        capacities. In spite of this, optimizing storage usage can still provide rich
        dividends. Several techniques based on delta-encoding and duplicate block
        suppression have been shown to reduce storage overheads, with varying
        requirements for resources such as computation and memory. We propose a new
        scheme for storage reduction that reduces data sizes with an effectiveness
        comparable to the more expensive techniques, but at a cost comparable to the
        faster but less effective ones. The scheme, called Redundancy Elimination at the
        Block Level (REBL), leverages the benefits of compression, duplicate block
        suppression, and delta-encoding to eliminate a broad spectrum of redundant data
        in a scalable and efficient manner. REBL generally encodes more compactly than
        compression (up to a factor of 14) and a combination of compression and duplicate
        suppression (up to a factor of 6.7). REBL also encodes similarly to a technique
        based on delta-encoding, reducing overall space significantly in one case.
        Furthermore, REBL uses super-fingerprints, a technique that reduces the data
        needed to identify similar blocks while dramatically reducing the computational
        requirements of matching the blocks: it turns O(n2) comparisons into hash table
        lookups. As a result, using super-fingerprints to avoid enumerating matching data
        objects decreases computation in the resemblance detection phase of REBL by up to
        a couple orders of magnitude}, 
  url = {http://portal.acm.org/citation.cfm?id=1247420$\#$}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.91.8331.pdf}, 
%%%%% ERROR: Missing field
% www_section = {?????},
}
1250746
@article{1250746,
  title = {Valgrind: a framework for heavyweight dynamic binary instrumentation}, 
  author = {Nethercote, Nicholas and Seward, Julian}, 
  journal = {SIGPLAN Not}, 
  volume = {42}, 
  number = {6}, 
  year = {2007}, 
  address = {New York, NY, USA}, 
  pages = {89--100}, 
  publisher = {ACM}, 
  abstract = {Dynamic binary instrumentation (DBI) frameworks make it easy to build dynamic
        binary analysis (DBA) tools such as checkers and profilers. Much of the focus on
        DBI frameworks has been on performance; little attention has been paid to their
        capabilities. As a result, we believe the potential of DBI has not been fully
        exploited. In this paper we describe Valgrind, a DBI framework designed for
        building heavyweight DBA tools. We focus on its unique support for shadow
        values-a powerful but previously little-studied and difficult-to-implement DBA
        technique, which requires a tool to shadow every register and memory value with
        another value that describes it. This support accounts for several crucial design
        features that distinguish Valgrind from other DBI frameworks. Because of these
        features, lightweight tools built with Valgrind run comparatively slowly, but
        Valgrind can be used to build more interesting, heavyweight tools that are
        difficult or impossible to build with other DBI frameworks such as Pin and
        DynamoRIO}, 
  www_section = {dynamic binary instrumentation}, 
  issn = {0362-1340}, 
  doi = {10.1145/1273442.1250746}, 
  url = {http://portal.acm.org/citation.cfm?id=1250746}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.108.4263.pdf}, 
}
1251057
@conference{1251057,
  title = {An analysis of compare-by-hash}, 
  author = {Henson, Val}, 
  booktitle = {HOTOS'03: Proceedings of the 9th conference on Hot Topics in Operating
        Systems}, 
  organization = {USENIX Association}, 
  year = {2003}, 
  address = {Berkeley, CA, USA}, 
  pages = {3--3}, 
  publisher = {USENIX Association}, 
  abstract = {Recent research has produced a new and perhaps dangerous technique for
        uniquely identifying blocks that I will call compare-by-hash. Using this
        technique, we decide whether two blocks are identical to each other by comparing
        their hash values, using a collision-resistant hash such as SHA-1[5]. If the hash
        values match, we assume the blocks are identical without further ado. Users of
        compare-by-hash argue that this assumption is warranted because the chance of a
        hash collision between any two randomly generated blocks is estimated to be many
        orders of magnitude smaller than the chance of many kinds of hardware errors.
        Further analysis shows that this approach is not as risk-free as it seems at
        first glance}, 
  url = {http://portal.acm.org/citation.cfm?id=1251057$\#$}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.100.8338.pdf}, 
%%%%% ERROR: Missing field
% www_section = {?????},
}
1251194
@conference{1251194,
  title = {Operating system support for planetary-scale network services}, 
  author = {Bavier, Andy and Bowman, Mic and Chun, Brent and Culler, David and Karlin,
        Scott and Muir, Steve and Peterson, Larry and Roscoe, Timothy and Spalink, Tammo
        and Wawrzoniak, Mike}, 
  booktitle = {NSDI'04: Proceedings of the 1st conference on Symposium on Networked Systems
        Design and Implementation}, 
  organization = {USENIX Association}, 
  year = {2004}, 
  address = {Berkeley, CA, USA}, 
  pages = {19--19}, 
  publisher = {USENIX Association}, 
  abstract = {PlanetLab is a geographically distributed overlay network designed to support
        the deployment and evaluation of planetary-scale network services. Two high-level
        goals shape its design. First, to enable a large research community to share the
        infrastructure, PlanetLab provides distributed virtualization, whereby each
        service runs in an isolated slice of PlanetLab's global resources. Second, to
        support competition among multiple network services, PlanetLab decouples the
        operating system running on each node from the network-wide services that define
        PlanetLab, a principle referred to as unbundled management. This paper describes
        how Planet-Lab realizes the goals of distributed virtualization and unbundled
        management, with a focus on the OS running on each node}, 
  www_section = {overlay networks}, 
  url = {http://portal.acm.org/citation.cfm?id=1251175.1251194$\#$}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/plos_nsdi_04.pdf}, 
}
1251195
@conference{1251195,
  title = {MACEDON: methodology for automatically creating, evaluating, and designing
        overlay networks}, 
  author = {Rodriguez, Adolfo and Killian, Charles and Bhat, Sooraj and Kosti{\'c}, Dejan
        and Vahdat, Amin}, 
  booktitle = {NSDI'04: Proceedings of the 1st conference on Symposium on Networked Systems
        Design and Implementation}, 
  organization = {USENIX Association}, 
  year = {2004}, 
  address = {Berkeley, CA, USA}, 
  pages = {20--20}, 
  publisher = {USENIX Association}, 
  abstract = {Currently, researchers designing and implementing large-scale overlay
        services employ disparate techniques at each stage in the production cycle:
        design, implementation, experimentation, and evaluation. As a result, complex and
        tedious tasks are often duplicated leading to ineffective resource use and
        difficulty in fairly comparing competing algorithms. In this paper, we present
        MACEDON, an infrastructure that provides facilities to: i) specify distributed
        algorithms in a concise domain-specific language; ii) generate code that executes
        in popular evaluation infrastructures and in live networks; iii) leverage an
        overlay-generic API to simplify the interoperability of algorithm implementations
        and applications; and iv) enable consistent experimental evaluation. We have used
        MACEDON to implement and evaluate a number of algorithms, including AMMO, Bullet,
        Chord, NICE, Overcast, Pastry, Scribe, and SplitStream, typically with only a few
        hundred lines of MACEDON code. Using our infrastructure, we are able to
        accurately reproduce or exceed published results and behavior demonstrated by
        current publicly available implementations}, 
  url = {http://portal.acm.org/citation.cfm?id=1251175.1251195$\#$}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.2.8796.pdf}, 
%%%%% ERROR: Missing field
% www_section = {?????},
}
1251207
@conference{1251207,
  title = {Detecting BGP configuration faults with static analysis}, 
  author = {Nick Feamster and Hari Balakrishnan}, 
  booktitle = {NSDI'05: Proceedings of the 2nd conference on Symposium on Networked Systems
        Design \& Implementation}, 
  organization = {USENIX Association}, 
  year = {2005}, 
  address = {Berkeley, CA, USA}, 
  pages = {43--56}, 
  publisher = {USENIX Association}, 
  abstract = {The Internet is composed of many independent autonomous systems (ASes) that
        exchange reachability information to destinations using the Border Gateway
        Protocol (BGP). Network operators in each AS configure BGP routers to control the
        routes that are learned, selected, and announced to other routers. Faults in BGP
        configuration can cause forwarding loops, packet loss, and unintended paths
        between hosts, each of which constitutes a failure of the Internet routing
        infrastructure. This paper describes the design and implementation of rcc, the
        router configuration checker, a tool that finds faults in BGP configurations
        using static analysis. rcc detects faults by checking constraints that are based
        on a high-level correctness specification. rcc detects two broad classes of
        faults: route validity faults, where routers may learn routes that do not
        correspond to usable paths, and path visibility faults, where routers may fail to
        learn routes for paths that exist in the network. rcc enables network operators
        to test and debug configurations before deploying them in an operational network,
        improving on the status quo where most faults are detected only during operation.
        rcc has been downloaded by more than sixty-five network operators to date, some
        of whom have shared their configurations with us. We analyze network-wide
        configurations from 17 different ASes to detect a wide variety of faults and use
        these findings to motivate improvements to the Internet routing infrastructure}, 
  www_section = {autonomous systems, border gateway protocol}, 
  url = {http://portal.acm.org/citation.cfm?id=1251207$\#$}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.113.5668.pdf}, 
}
1251279
@conference{1251279,
  title = {Energy-efficiency and storage flexibility in the blue file system}, 
  author = {Nightingale, Edmund B. and Flinn, Jason}, 
  booktitle = {OSDI'04: Proceedings of the 6th conference on Symposium on Opearting Systems
        Design \& Implementation}, 
  organization = {USENIX Association}, 
  year = {2004}, 
  address = {Berkeley, CA, USA}, 
  pages = {25--25}, 
  publisher = {USENIX Association}, 
  abstract = {A fundamental vision driving pervasive computing research is access to
        personal and shared data anywhere at anytime. In many ways, this vision is close
        to being realized. Wireless networks such as 802.11 offer connectivity to small,
        mobile devices. Portable storage, such as mobile disks and USB keychains, let
        users carry several gigabytes of data in their pockets. Yet, at least three
        substantial barriers to pervasive data access remain. First, power-hungry network
        and storage devices tax the limited battery capacity of mobile computers. Second,
        the danger of viewing stale data or making inconsistent updates grows as objects
        are replicated across more computers and portable storage devices. Third, mobile
        data access performance can suffer due to variable storage access times caused by
        dynamic power management, mobility, and use of heterogeneous storage devices. To
        overcome these barriers, we have built a new distributed file system called
        BlueFS. Compared to the Coda file system, BlueFS reduces file system energy usage
        by up to 55\% and provides up to 3 times faster access to data replicated on
        portable storage}, 
  www_section = {802.11, file systems}, 
  url = {http://portal.acm.org/citation.cfm?id=1251279$\#$}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/nightingale-bluefs2004.pdf},
}
1251470
@conference{1251470,
  title = {Symphony: distributed hashing in a small world}, 
  author = {Manku, Gurmeet Singh and Bawa, Mayank and Prabhakar Raghavan}, 
  booktitle = {USITS'03: Proceedings of the 4th conference on USENIX Symposium on Internet
        Technologies and Systems}, 
  organization = {USENIX Association}, 
  year = {2003}, 
  address = {Berkeley, CA, USA}, 
  pages = {10--10}, 
  publisher = {USENIX Association}, 
  abstract = {We present Symphony, a novel protocol for maintaining distributed hash tables
        in a wide area network. The key idea is to arrange all participants along a ring
        and equip them with long distance contacts drawn from a family of harmonic
        distributions. Through simulation, we demonstrate that our construction is
        scalable, flexible, stable in the presence of frequent updates and offers small
        average latency with only a handful of long distance links per node. The cost of
        updates when hosts join and leave is small}, 
  www_section = {small-world}, 
  url = {http://portal.acm.org/citation.cfm?id=1251460.1251470$\#$}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/manku03symphony.pdf}, 
}
1251532
@conference{1251532,
  title = {Non-transitive connectivity and DHTs}, 
  author = {Michael J. Freedman and Lakshminarayanan, Karthik and Rhea, Sean C. and Ion
        Stoica}, 
  booktitle = {WORLDS'05: Proceedings of the 2nd conference on Real, Large Distributed
        Systems}, 
  organization = {USENIX Association}, 
  year = {2005}, 
  address = {Berkeley, CA, USA}, 
  pages = {55--60}, 
  publisher = {USENIX Association}, 
  abstract = {The most basic functionality of a distributed hash table, or DHT, is to
        partition a key space across the set of nodes in a distributed system such that
        all nodes agree on the partitioning. For example, the Chord DHT assigns each
        node}, 
  www_section = {Chord, distributed hash table}, 
  url = {http://portal.acm.org/citation.cfm?id=1251532$\#$}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/ntr-worlds05.pdf}, 
}
1267093
@conference{1267093,
  title = {File system design for an NFS file server appliance}, 
  author = {Hitz, Dave and Lau, James and Malcolm, Michael}, 
  booktitle = {WTEC'94: Proceedings of the USENIX Winter 1994 Technical Conference on
        USENIX Winter 1994 Technical Conference}, 
  organization = {USENIX Association}, 
  year = {1994}, 
  address = {Berkeley, CA, USA}, 
  pages = {19--19}, 
  publisher = {USENIX Association}, 
  abstract = {Network Appliance Corporation recently began shipping a new kind of network
        server called an NFS file server appliance, which is a dedicated server whose
        sole function is to provide NFS file service. The file system requirements for an
        NFS appliance are different from those for a general-purpose UNIX system, both
        because an NFS appliance must be optimized for network file access and because an
        appliance must be easy to use. This paper describes WAFL (Write Anywhere File
        Layout), which is a file system designed specifically to work in an NFS
        appliance. The primary focus is on the algorithms and data structures that WAFL
        uses to implement Snapshotst, which are read-only clones of the active file
        system. WAFL uses a copy-on-write technique to minimize the disk space that
        Snapshots consume. This paper also describes how WAFL uses Snapshots to eliminate
        the need for file system consistency checking after an unclean shutdown}, 
  url = {http://portal.acm.org/citation.cfm?id=1267093$\#$}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.40.3691.pdf}, 
  www_section = {Unsorted}, 
}
1267366
@conference{1267366,
  title = {Compare-by-hash: a reasoned analysis}, 
  author = {Black, John}, 
  booktitle = {ATEC '06: Proceedings of the annual conference on USENIX '06 Annual
        Technical Conference}, 
  organization = {USENIX Association}, 
  year = {2006}, 
  address = {Berkeley, CA, USA}, 
  pages = {7--7}, 
  publisher = {USENIX Association}, 
  abstract = {Compare-by-hash is the now-common practice used by systems designers who
        assume that when the digest of a cryptographic hash function is equal on two
        distinct files, then those files are identical. This approach has been used in
        both real projects and in research efforts (for example rysnc [16] and LBFS
        [12]). A recent paper by Henson criticized this practice [8]. The present paper
        revisits the topic from an advocate's standpoint: we claim that compare-by-hash
        is completely reasonable, and we offer various arguments in support of this
        viewpoint in addition to addressing concerns raised by Henson}, 
  url = {http://portal.acm.org/citation.cfm?id=1267366$\#$}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.125.4474.pdf}, 
%%%%% ERROR: Missing field
% www_section = {?????},
}
1267576
@conference{1267576,
  title = {Establishing identity without certification authorities}, 
  author = {Ellison, Carl M.}, 
  booktitle = {SSYM'96: Proceedings of the 6th conference on USENIX Security Symposium,
        Focusing on Applications of Cryptography}, 
  organization = {USENIX Association}, 
  year = {1996}, 
  address = {Berkeley, CA, USA}, 
  pages = {7--7}, 
  publisher = {USENIX Association}, 
  abstract = {this paper is that a traditional identity certificate is neither necessary
        nor sufficient for this purpose. It is especially useless if the two parties
        concerned did not have the foresight to obtain such certificates before desiring
        to open a secure channel. There are many methods for establishing identity
        without using certificates from trusted certification authorities. The
        relationship between verifier and subject guides the choice of method. Many of
        these relationships have easy, straight-forward methods for binding a public key
        to an identity, using a broadcast channel or 1:1 meetings, but one relationship
        makes it especially difficult. That relationship is one with an old friend with
        whom you had lost touch but who appears now to be available on the net. You make
        contact and share a few exchanges which suggest to you that this is, indeed, your
        old friend. Then you want to form a secure channel in order to carry on a more
        extensive conversation in private. This case is subject to the man-in-themiddle
        attack. For this case, a protocol is presented which binds a pair of identities
        to a pair of public keys without using any certificates issued by a trusted CA.
        The apparent direct conflict between conventional wisdom and the thesis of this
        paper lies in the definition of the word "identity" -- a word which is commonly
        left undefined in discussions of certification}, 
  www_section = {certificate revocation, public key cryptography}, 
  url = {http://portal.acm.org/citation.cfm?id=1267576$\#$}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.31.7263.pdf}, 
}
1268712
@conference{1268712,
  title = {Operation-based update propagation in a mobile file system}, 
  author = {Lee, Yui-Wah and Leung, Kwong-Sak and Satyanarayanan, Mahadev}, 
  booktitle = {ATEC '99: Proceedings of the annual conference on USENIX Annual Technical
        Conference}, 
  organization = {USENIX Association}, 
  year = {1999}, 
  address = {Berkeley, CA, USA}, 
  pages = {4--4}, 
  publisher = {USENIX Association}, 
  abstract = {In this paper we describe a technique called operation-based update
        propagation for efficiently transmitting updates to large files that have been
        modified on a weakly connected client of a distributed file system. In this
        technique, modifications are captured above the file-system layer at the client,
        shipped to a surrogate client that is strongly connected to a server, re-executed
        at the surrogate, and the resulting files transmitted from the surrogate to the
        server. If re-execution fails to produce a file identical to the original, the
        system falls back to shipping the file from the client over the slow network. We
        have implemented a prototype of this mechanism in the Coda File System on Linux,
        and demonstrated performance improvements ranging from 40 percents to nearly
        three orders of magnitude in reduced network traffic and elapsed time. We also
        found a novel use of forward error correction in this context}, 
  url = {http://portal.acm.org/citation.cfm?id=1268712$\#$}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/lee.pdf}, 
%%%%% ERROR: Missing field
% www_section = {?????},
}
1270971
@conference{1270971,
  title = {Towards Fair Event Dissemination}, 
  author = {Baehni, Sebastien and Rachid Guerraoui and Boris Koldehofe and Monod, Maxime}, 
  booktitle = {ICDCSW '07: Proceedings of the 27th International Conference on Distributed
        Computing Systems Workshops}, 
  organization = {IEEE Computer Society}, 
  year = {2007}, 
  address = {Washington, DC, USA}, 
  pages = {0--63}, 
  publisher = {IEEE Computer Society}, 
  abstract = {Event dissemination in large scale dynamic systems is typically claimed to be
        best achieved using decentralized peer-to-peer architectures. The rationale is to
        have every participant in the system act both as a client (information consumer)
        and as a server (information dissemination enabler), thus, precluding specific
        brokers which would prevent scalability and fault-tolerance. We argue that, for
        such decentralized architectures to be really meaningful, participants should
        serve the system as much as they benefit from it. That is, the system should be
        fair in the sense that the extend to which a participant acts as a server should
        depend on the extend to which it has the opportunity to act as a client. This is
        particularly crucial in selective information dissemination schemes where clients
        are not all interested in the same information. In this position paper, we
        discuss what a notion of fairness could look like, explain why current
        architectures are not fair, and raise several challenges towards achieving
        fairness}, 
  isbn = {0-7695-2838-4}, 
  doi = {10.1109/ICDCSW.2007.83}, 
  url = {http://portal.acm.org/citation.cfm?id=1270388.1270971$\#$}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.90.9758.pdf}, 
%%%%% ERROR: Missing field
% www_section = {?????},
}
1273222
@article{1273222,
  title = {Privacy protection in personalized search}, 
  author = {Shen, Xuehua and Tan, Bin and Zhai, ChengXiang}, 
  journal = {SIGIR Forum}, 
  volume = {41}, 
  number = {1}, 
  year = {2007}, 
  address = {New York, NY, USA}, 
  pages = {4--17}, 
  publisher = {ACM}, 
  abstract = {Personalized search is a promising way to improve the accuracy of web search,
        and has been attracting much attention recently. However, effective personalized
        search requires collecting and aggregating user information, which often raise
        serious concerns of privacy infringement for many users. Indeed, these concerns
        have become one of the main barriers for deploying personalized search
        applications, and how to do privacy-preserving personalization is a great
        challenge. In this paper, we systematically examine the issue of privacy
        preservation in personalized search. We distinguish and define four levels of
        privacy protection, and analyze various software architectures for personalized
        search. We show that client-side personalization has advantages over the existing
        server-side personalized search services in preserving privacy, and envision
        possible future strategies to fully protect user privacy}, 
  www_section = {privacy, search}, 
  issn = {0163-5840}, 
  doi = {http://doi.acm.org/10.1145/1273221.1273222}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/2007j_sigirforum_shen.pdf},
}
1273450
@article{1273450,
  title = {On compact routing for the internet}, 
  author = {Krioukov, Dmitri and Fall, Kevin and Brady, Arthur}, 
  journal = {SIGCOMM Comput. Commun. Rev}, 
  volume = {37}, 
  number = {3}, 
  year = {2007}, 
  address = {New York, NY, USA}, 
  pages = {41--52}, 
  publisher = {ACM}, 
  abstract = {The Internet's routing system is facing stresses due to its poor fundamental
        scaling properties. Compact routing is a research field that studies fundamental
        limits of routing scalability and designs algorithms that try to meet these
        limits. In particular, compact routing research shows that shortest-path routing,
        forming a core of traditional routing algorithms, cannot guarantee routing table
        (RT) sizes that on all network topologies grow slower than linearly as functions
        of the network size. However, there are plenty of compact routing schemes that
        relax the shortest-path requirement and allow for improved, sublinear RT size
        scaling that is mathematically provable for all static network topologies. In
        particular, there exist compact routing schemes designed for grids, trees, and
        Internet-like topologies that offer RT sizes that scale logarithmically with the
        network size. In this paper, we demonstrate that in view of recent results in
        compact routing research, such logarithmic scaling on Internet-like topologies is
        fundamentally impossible in the presence of topology dynamics or
        topology-independent (flat) addressing. We use analytic arguments to show that
        the number of routing control messages per topology change cannot scale better
        than linearly on Internet-like topologies. We also employ simulations to confirm
        that logarithmic RT size scaling gets broken by topology-independent addressing,
        a cornerstone of popular locator-identifier split proposals aiming at improving
        routing scaling in the presence of network topology dynamics or host mobility.
        These pessimistic findings lead us to the conclusion that a fundamental
        re-examination of assumptions behind routing models and abstractions is needed in
        order to find a routing architecture that would be able to scale "indefinitely}, 
  www_section = {compact routing, internet routing, routing scalability}, 
  issn = {0146-4833}, 
  doi = {10.1145/1273445.1273450}, 
  url = {http://portal.acm.org/citation.cfm?id=1273450$\#$}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.102.5763.pdf}, 
}
1290327
@article{1290327,
  title = {On improving the efficiency of truthful routing in MANETs with selfish nodes}, 
  author = {Wang, Yongwei and Singhal, Mukesh}, 
  journal = {Pervasive Mob. Comput}, 
  volume = {3}, 
  number = {5}, 
  year = {2007}, 
  address = {Amsterdam, The Netherlands, The Netherlands}, 
  pages = {537--559}, 
  publisher = {Elsevier Science Publishers B. V}, 
  abstract = {In Mobile Ad Hoc Networks (MANETs), nodes depend upon each other for routing
        and forwarding packets. However, nodes belonging to independent authorities in
        MANETs may behave selfishly and may not forward packets to save battery and other
        resources. To stimulate cooperation, nodes are rewarded for their forwarding
        service. Since nodes spend different cost to forward packets, it is desirable to
        reimburse nodes according to their cost so that nodes get incentive while the
        least total payment is charged to the sender. However, to maximize their utility,
        nodes may tell lie about their cost. This poses the requirement of truthful
        protocols, which maximizes the utility of nodes only when they declare their true
        cost. Anderegg and Eidenbenz recently proposed a truthful routing protocol, named
        ad hoc-VCG. This protocol incurs the route discovery overhead of O(n3), where n
        is the number of nodes in the network. This routing overhead is likely to become
        prohibitively large as the network size grows. Moreover, it leads to low network
        performance due to congestion and interference. We present a low-overhead
        truthful routing protocol for route discovery in MANETs with selfish nodes by
        applying mechanism design. The protocol, named LOTTO (Low Overhead Truthful
        rouTing prOtocol), finds a least cost path for data forwarding with a lower
        routing overhead of O(n2). We conduct an extensive simulation study to evaluate
        the performance of our protocol and compare it with ad hoc-VCG. Simulation
        results show that our protocol provides a much higher packet delivery ratio,
        generates much lower overhead and has much lower end-to-end delay}, 
  www_section = {mobile Ad-hoc networks, routing, VCG mechanism}, 
  issn = {1574-1192}, 
  doi = {10.1016/j.pmcj.2007.02.001}, 
  url = {http://portal.acm.org/citation.cfm?id=1290327$\#$}, 
}
1326260
@conference{1326260,
  title = {Skype4Games}, 
  author = {Triebel, Tonio and Guthier, Benjamin and Effelsberg, Wolfgang}, 
  booktitle = {NetGames '07: Proceedings of the 6th ACM SIGCOMM workshop on Network and
        system support for games}, 
  organization = {ACM}, 
  year = {2007}, 
  address = {New York, NY, USA}, 
  pages = {13--18}, 
  publisher = {ACM}, 
  abstract = {We propose to take advantage of the distributed multi-user Skype system for
        the implementation of an interactive online game. Skype combines efficient
        multi-peer support with the ability to get around firewalls and network address
        translation; in addition, speech is available to all game participants for free.
        We discuss the network requirements of interactive multi-player games, in
        particular concerning end-to-end delay and distributed state maintenance. We then
        introduce the multi-user support available in Skype and conclude that it should
        suffice for a game implementation. We explain how our multi-player game based on
        the Irrlicht graphics engine was implemented over Skype, and we present very
        promising results of an early performance evaluation}, 
  www_section = {distributed interactive applications, P2P}, 
  isbn = {978-0-9804460-0-5}, 
  doi = {10.1145/1326257.1326260}, 
  url = {http://portal.acm.org/citation.cfm?id=1326260$\#$}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Triebel2007a.pdf}, 
}
1327188
@article{1327188,
  title = {Private Searching on Streaming Data}, 
  author = {Rafail Ostrovsky and William E. Skeith}, 
  journal = {J. Cryptol}, 
  volume = {20}, 
  number = {4}, 
  year = {2007}, 
  address = {Secaucus, NJ, USA}, 
  pages = {397--430}, 
  publisher = {Springer-Verlag New York, Inc}, 
  abstract = {In this paper we consider the problem of private searching on streaming data,
        where we can efficiently implement searching for documents that satisfy a secret
        criteria (such as the presence or absence of a hidden combination of hidden
        keywords) under various cryptographic assumptions. Our results can be viewed in a
        variety of ways: as a generalization of the notion of private information
        retrieval (to more general queries and to a streaming environment); as positive
        results on privacy-preserving datamining; and as a delegation of hidden program
        computation to other machines}, 
  www_section = {keywords, privacy, private information retrieval, search, streaming}, 
  issn = {0933-2790}, 
  doi = {http://dx.doi.org/10.1007/s00145-007-0565-3}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Ostrovsky-Skeith.pdf}, 
}
1329865
@mastersthesis{1329865,
  title = {Cheat-proof event ordering for large-scale distributed multiplayer games}, 
  author = {Chis GauthierDickey}, 
  school = {University of Oregon}, 
  year = {2007}, 
  address = {Eugene, OR, USA}, 
  type = {phd}, 
  note = {Adviser-Lo, Virginia}, 
  abstract = {Real-time, interactive, multi-user (RIM) applications are networked
        applications that allow users to collaborate and interact with each other over
        the Internet for work, education and training, or entertainment purposes.
        Multiplayer games, distance learning applications, collaborative whiteboards,
        immersive educational and training simulations, and distributed interactive
        simulations are examples of these applications. Of these RIM applications,
        multiplayer games are an important class for research due to their widespread
        deployment and popularity on the Internet. Research with multiplayer games will
        have a direct impact on all RIM applications. While large-scale multiplayer games
        have typically used a client/server architecture for network communication, we
        propose using a peer-to-peer architecture to solve the scalability problems
        inherent in centralized systems. Past research and actual deployments of
        peer-to-peer networks show that they can scale to millions of users. However,
        these prior peer-to-peer networks do not meet the low latency and interactive
        requirements that multi-player games need. Indeed, the fundamental problem of
        maintaining consistency between all nodes in the face of failures, delays, and
        malicious attacks has to be solved to make a peer-to-peer networks a viable
        solution. We propose solving the consistency problem through secure and scalable
        event ordering. While traditional event ordering requires all-to-all message
        passing and at least two rounds of communication, we argue that multiplayer games
        lend themselves naturally to a hierarchical decomposition of their state space so
        that we can reduce the communication cost of event ordering. We also argue that
        by using cryptography, a discrete view of time, and majority voting, we can
        totally order events in a real-time setting. By applying these two concepts, we
        can scale multiplayer games to millions of players. We develop our solution in
        two parts: a cheat-proof and real-time event ordering protocol and a scalable,
        hierarchical structure that organizes peers in a tree according to their scope of
        interest in the game. Our work represents the first, complete solution to this
        problem and we show through both proofs and simulations that our protocols allow
        the creation of large-scale, peer-to-peer games that are resistant to cheating
        while maintaining real-time responsiveness in the system}, 
  url = {http://portal.acm.org/citation.cfm?id=1329865$\#$}, 
%%%%% ERROR: Missing field
% www_section = {?????},
}
1341892
@article{1341892,
  title = {ODSBR: An on-demand secure Byzantine resilient routing protocol for wireless ad
        hoc networks}, 
  author = {Awerbuch, Baruch and Curtmola, Reza and Holmer, David and Nita-Rotaru, Cristina
        and Rubens, Herbert}, 
  journal = {ACM Trans. Inf. Syst. Secur}, 
  volume = {10}, 
  number = {4}, 
  year = {2008}, 
  address = {New York, NY, USA}, 
  pages = {1--35}, 
  publisher = {ACM}, 
  abstract = {Ah hoc networks offer increased coverage by using multihop communication.
        This architecture makes services more vulnerable to internal attacks coming from
        compromised nodes that behave arbitrarily to disrupt the network, also referred
        to as Byzantine attacks. In this work, we examine the impact of several Byzantine
        attacks performed by individual or colluding attackers. We propose ODSBR, the
        first on-demand routing protocol for ad hoc wireless networks that provides
        resilience to Byzantine attacks caused by individual or colluding nodes. The
        protocol uses an adaptive probing technique that detects a malicious link after
        log n faults have occurred, where n is the length of the path. Problematic links
        are avoided by using a route discovery mechanism that relies on a new metric that
        captures adversarial behavior. Our protocol never partitions the network and
        bounds the amount of damage caused by attackers. We demonstrate through
        simulations ODSBR's effectiveness in mitigating Byzantine attacks. Our analysis
        of the impact of these attacks versus the adversary's effort gives insights into
        their relative strengths, their interaction, and their importance when designing
        multihop wireless routing protocols}, 
  www_section = {ad-hoc networks, byzantine fault tolerance, on-demand routing, security
        model}, 
  issn = {1094-9224}, 
  doi = {10.1145/1284680.1341892}, 
  url = {http://portal.acm.org/citation.cfm?id=1284680.1341892$\#$}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/ODSBR-TISSEC.pdf}, 
}
1345798
@conference{1345798,
  title = {Dependability Evaluation of Cooperative Backup Strategies for Mobile Devices}, 
  author = {Ludovic Court{\`e}s and Hamouda, Ossama and Kaaniche, Mohamed and Killijian,
        Marc-Olivier and Powell, David}, 
  booktitle = {PRDC '07: Proceedings of the 13th Pacific Rim International Symposium on
        Dependable Computing}, 
  organization = {IEEE Computer Society}, 
  year = {2007}, 
  address = {Washington, DC, USA}, 
  pages = {139--146}, 
  publisher = {IEEE Computer Society}, 
  abstract = {Mobile devices (e.g., laptops, PDAs, cell phones) are increasingly relied on
        but are used in contexts that put them at risk of physical damage, loss or theft.
        This paper discusses the dependability evaluation of a cooperative backup service
        for mobile devices. Participating devices leverage encounters with other devices
        to temporarily replicate critical data. Permanent backups are created when the
        participating devices are able to access the fixed infrastructure. Several data
        replication and scattering strategies are presented,including the use of erasure
        codes. A methodology to model and evaluate them using Petri nets and Markov
        chains is described. We demonstrate that our cooperative backup service decreases
        the probability of data loss by a factor up to the ad hoc to Internet
        connectivity ratio}, 
  isbn = {0-7695-3054-0}, 
  doi = {10.1109/PRDC.2007.29}, 
  url = {http://portal.acm.org/citation.cfm?id=1345534.1345798$\#$}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.65.8269_0.pdf}, 
%%%%% ERROR: Missing field
% www_section = {?????},
}
1358311
@article{1358311,
  title = {Linyphi: creating IPv6 mesh networks with SSR}, 
  author = {Di, Pengfei and Johannes Eickhold and Thomas Fuhrmann}, 
  journal = {Concurr. Comput. : Pract. Exper}, 
  volume = {20}, 
  number = {6}, 
  year = {2008}, 
  address = {Chichester, UK}, 
  pages = {675--691}, 
  publisher = {John Wiley and Sons Ltd}, 
  abstract = {Scalable source routing (SSR) is a self-organizing routing protocol which is
        especially suited for networks that do not have a well-crafted structure, e.g. ad
        hoc and mesh networks. SSR works on a flat identifier space. As a consequence, it
        can easily support host mobility without requiring any location directory or
        other centralized service. SSR is based on a virtual ring structure, which is
        used in a chord-like manner to obtain source routes to previously unknown
        destinations. It has been shown that SSR requires very little per node state and
        produces very little control messages. In particular, SSR has been found to
        outperform other ad hoc routing protocols such as ad hoc on-demand distance
        vector routing, optimized link-state routing, or beacon vector routing. In this
        paper we present Linyphi, an implementation of SSR for wireless access routers.
        Linyphi combines IPv6 and SSR so that unmodified IPv6 hosts have transparent
        connectivity to both the Linyphi mesh network and the IPv4-v6 Internet. We give a
        basic outline of the implementation and demonstrate its suitability in real-world
        mesh network scenarios. Furthermore, we illustrate the use of Linyphi for
        distributed applications such as the Linyphone peer-to-peer VoIP application.
        Copyright {\textcopyright} 2008 John Wiley \& Sons, Ltd}, 
  www_section = {scalable source routing}, 
  issn = {1532-0626}, 
  doi = {10.1002/cpe.v20:6}, 
  url = {http://portal.acm.org/citation.cfm?id=1358302.1358311$\#$}, 
}
1361410
@conference{1361410,
  title = {Purely functional system configuration management}, 
  author = {Dolstra, Eelco and Hemel, Armijn}, 
  booktitle = {HOTOS'07: Proceedings of the 11th USENIX workshop on Hot topics in operating
        systems}, 
  organization = {USENIX Association}, 
  year = {2007}, 
  address = {Berkeley, CA, USA}, 
  pages = {1--6}, 
  publisher = {USENIX Association}, 
  abstract = {System configuration management is difficult because systems evolve in an
        undisciplined way: packages are upgraded, configuration files are edited, and so
        on. The management of existing operating systems is strongly imperative in
        nature, since software packages and configuration data (e.g., /bin and /etc in
        Unix) can be seen as imperative data structures: they are updated in-place by
        system administration actions. In this paper we present an alternative approach
        to system configuration management: a purely functional method, analogous to
        languages like Haskell. In this approach, the static parts of a configuration --
        software packages, configuration files, control scripts -- are built from pure
        functions, i.e., the results depend solely on the specified inputs of the
        function and are immutable. As a result, realising a system configuration becomes
        deterministic and reproducible. Upgrading to a new configuration is mostly atomic
        and doesn't overwrite anything of the old configuration, thus enabling rollbacks.
        We have implemented the purely functional model in a small but realistic
        Linux-based operating system distribution called NixOS}, 
  url = {http://portal.acm.org/citation.cfm?id=1361410$\#$}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/dolstra.pdf}, 
%%%%% ERROR: Missing field
% www_section = {?????},
}
1373458
@article{1373458,
  title = {Efficient routing in intermittently connected mobile networks: the single-copy
        case}, 
  author = {Spyropoulos, Thrasyvoulos and Psounis, Konstantinos and Raghavendra, Cauligi
        S.}, 
  journal = {IEEE/ACM Trans. Netw}, 
  volume = {16}, 
  number = {1}, 
  year = {2008}, 
  address = {Piscataway, NJ, USA}, 
  pages = {63--76}, 
  publisher = {IEEE Press}, 
  abstract = {Intermittently connected mobile networks are wireless networks where most of
        the time there does not exist a complete path from the source to the destination.
        There are many real networks that follow this model, for example, wildlife
        tracking sensor networks, military networks, vehicular ad hoc networks (VANETs),
        etc. In this context, conventional routing schemes would fail, because they try
        to establish complete end-to-end paths, before any data is sent. To deal with
        such networks researchers have suggested to use flooding-based routing schemes.
        While flooding-based schemes have a high probability of delivery, they waste a
        lot of energy and suffer from severe contention which can significantly degrade
        their performance. With this in mind, we look into a number of "single-copy"
        routing schemes that use only one copy per message, and hence significantly
        reduce the resource requirements of flooding-based algorithms. We perform a
        detailed exploration of the single-copy routing space in order to identify
        efficient single-copy solutions that (i) can be employed when low resource usage
        is critical, and (ii) can help improve the design of general routing schemes that
        use multiple copies. We also propose a theoretical framework that we use to
        analyze the performance of all single-copy schemes presented, and to derive upper
        and lower bounds on the delay of any scheme}, 
  www_section = {mobile Ad-hoc networks, routing}, 
  issn = {1063-6692}, 
  doi = {10.1109/TNET.2007.897962}, 
  url = {http://portal.acm.org/citation.cfm?id=1373458$\#$}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.74.8097.pdf}, 
}
1373992
@article{1373992,
  title = {Characterizing unstructured overlay topologies in modern P2P file-sharing
        systems}, 
  author = {Stutzbach, Daniel and Rejaie, Reza and Sen, Subhabrata}, 
  journal = {IEEE/ACM Trans. Netw}, 
  volume = {16}, 
  number = {2}, 
  year = {2008}, 
  address = {Piscataway, NJ, USA}, 
  pages = {267--280}, 
  publisher = {IEEE Press}, 
  abstract = {In recent years, peer-to-peer (P2P) file-sharing systems have evolved to
        accommodate growing numbers of participating peers. In particular, new features
        have changed the properties of the unstructured overlay topologies formed by
        these peers. Little is known about the characteristics of these topologies and
        their dynamics in modern file-sharing applications, despite their importance.
        This paper presents a detailed characterization of P2P overlay topologies and
        their dynamics, focusing on the modern Gnutella network. We present Cruiser, a
        fast and accurate P2P crawler, which can capture a complete snapshot of the
        Gnutella network of more than one million peers in just a few minutes, and show
        how inaccuracy in snapshots can lead to erroneous conclusions--such as a
        power-law degree distribution. Leveraging recent overlay snapshots captured with
        Cruiser, we characterize the graph-related properties of individual overlay
        snapshots and overlay dynamics across slices of back-to-back snapshots. Our
        results reveal that while the Gnutella network has dramatically grown and changed
        in many ways, it still exhibits the clustering and short path lengths of a small
        world network. Furthermore, its overlay topology is highly resilient to random
        peer departure and even systematic attacks. More interestingly, overlay dynamics
        lead to an "onion-like" biased connectivity among peers where each peer is more
        likely connected to peers with higher uptime. Therefore, long-lived peers form a
        stable core that ensures reachability among peers despite overlay dynamics}, 
  www_section = {file-sharing, P2P}, 
  issn = {1063-6692}, 
  doi = {10.1109/TNET.2007.900406}, 
  url = {http://portal.acm.org/citation.cfm?id=1373992$\#$}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/stutzbach.pdf}, 
}
1387603
@conference{1387603,
  title = {BFT protocols under fire}, 
  author = {Singh, Atul and Das, Tathagata and Maniatis, Petros and Peter Druschel and
        Roscoe, Timothy}, 
  booktitle = {NSDI'08: Proceedings of the 5th USENIX Symposium on Networked Systems Design
        and Implementation}, 
  organization = {USENIX Association}, 
  year = {2008}, 
  address = {Berkeley, CA, USA}, 
  pages = {189--204}, 
  publisher = {USENIX Association}, 
  abstract = {Much recent work on Byzantine state machine replication focuses on protocols
        with improved performance under benign conditions (LANs, homogeneous replicas,
        limited crash faults), with relatively little evaluation under typical, practical
        conditions (WAN delays, packet loss, transient disconnection, shared resources).
        This makes it difficult for system designers to choose the appropriate protocol
        for a real target deployment. Moreover, most protocol implementations differ in
        their choice of runtime environment, crypto library, and transport, hindering
        direct protocol comparisons even under similar conditions. We present a
        simulation environment for such protocols that combines a declarative networking
        system with a robust network simulator. Protocols can be rapidly implemented from
        pseudocode in the high-level declarative language of the former, while network
        conditions and (measured) costs of communication packages and crypto primitives
        can be plugged into the latter. We show that the resulting simulator faithfully
        predicts the performance of native protocol implementations, both as published
        and as measured in our local network. We use the simulator to compare
        representative protocols under identical conditions and rapidly explore the
        effects of changes in the costs of crypto operations, workloads, network
        conditions and faults. For example, we show that Zyzzyva outperforms protocols
        like PBFT and Q/U undermost but not all conditions, indicating that
        one-size-fits-all protocols may be hard if not impossible to design in practice}, 
  isbn = {111-999-5555-22-1}, 
  url = {http://portal.acm.org/citation.cfm?id=1387603$\#$}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/BFTSim-nsdi08.pdf}, 
%%%%% ERROR: Missing field
% www_section = {?????},
}
1390683
@article{1390683,
  title = {Linear-Time Computation of Similarity Measures for Sequential Data}, 
  author = {Rieck, Konrad and Laskov, Pavel}, 
  journal = {J. Mach. Learn. Res}, 
  volume = {9}, 
  year = {2008}, 
  pages = {23--48}, 
  publisher = {JMLR.org}, 
  abstract = {Efficient and expressive comparison of sequences is an essential procedure
        for learning with sequential data. In this article we propose a generic framework
        for computation of similarity measures for sequences, covering various kernel,
        distance and non-metric similarity functions. The basis for comparison is
        embedding of sequences using a formal language, such as a set of natural words,
        k-grams or all contiguous subsequences. As realizations of the framework we
        provide linear-time algorithms of different complexity and capabilities using
        sorted arrays, tries and suffix trees as underlying data structures. Experiments
        on data sets from bioinformatics, text processing and computer security
        illustrate the efficiency of the proposed algorithms---enabling peak performances
        of up to 106 pairwise comparisons per second. The utility of distances and
        non-metric similarity measures for sequences as alternatives to string kernels is
        demonstrated in applications of text categorization, network intrusion detection
        and transcription site recognition in DNA}, 
  issn = {1532-4435}, 
  url = {http://portal.acm.org/citation.cfm?id=1390683$\#$}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/jmlr08.pdf}, 
%%%%% ERROR: Missing field
% www_section = {?????},
}
1396915
@conference{1396915,
  title = {S/Kademlia: A practicable approach towards secure key-based routing}, 
  author = {Baumgart, Ingmar and Sebastian Mies}, 
  booktitle = {ICPADS '07: Proceedings of the 13th International Conference on Parallel and
        Distributed Systems}, 
  organization = {IEEE Computer Society}, 
  year = {2007}, 
  address = {Washington, DC, USA}, 
  pages = {1--8}, 
  publisher = {IEEE Computer Society}, 
  abstract = {Security is a common problem in completely decentralized peer-to-peer
        systems. Although several suggestions exist on how to create a secure key-based
        routing protocol, a practicable approach is still unattended. In this paper we
        introduce a secure key-based routing protocol based on Kademlia that has a high
        resilience against common attacks by using parallel lookups over multiple
        disjoint paths, limiting free nodeId generation with crypto puzzles and
        introducing a reliable sibling broadcast. The latter is needed to store data in a
        safe replicated way. We evaluate the security of our proposed extensions to the
        Kademlia protocol analytically and simulate the effects of multiple disjoint
        paths on lookup success under the influence of adversarial nodes}, 
  isbn = {978-1-4244-1889-3}, 
  doi = {10.1109/ICPADS.2007.4447808}, 
  url = {http://portal.acm.org/citation.cfm?id=1396915$\#$}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/SKademlia2007.pdf}, 
%%%%% ERROR: Missing field
% www_section = {?????},
}
1424615
@conference{1424615,
  title = {Trust-Rated Authentication for Domain-Structured Distributed Systems}, 
  author = {Ralph Holz and Heiko Niedermayer and Hauck, Peter and Carle, Georg}, 
  booktitle = {EuroPKI '08: Proceedings of the 5th European PKI workshop on Public Key
        Infrastructure}, 
  organization = {Springer-Verlag}, 
  year = {2008}, 
  address = {Berlin, Heidelberg}, 
  pages = {74--88}, 
  publisher = {Springer-Verlag}, 
  abstract = {We present an authentication scheme and new protocol for domain-based
        scenarios with inter-domain authentication. Our protocol is primarily intended
        for domain-structured Peer-to-Peer systems but is applicable for any domain
        scenario where clients from different domains wish to authenticate to each other.
        To this end, we make use of Trusted Third Parties in the form of Domain
        Authentication Servers in each domain. These act on behalf of their clients,
        resulting in a four-party protocol. If there is a secure channel between the
        Domain Authentication Servers, our protocol can provide secure authentication. To
        address the case where domains do not have a secure channel between them, we
        extend our scheme with the concept of trust-rating. Domain Authentication Servers
        signal security-relevant information to their clients (pre-existing secure
        channel or not, trust, ...). The clients evaluate this information to decide if
        it fits the security requirements of their application}, 
  www_section = {authentication, distributed systems, P2P, PKI, trust}, 
  isbn = {978-3-540-69484-7}, 
  doi = {10.1007/978-3-540-69485-4}, 
  url = {http://www.springerlink.com/content/k6786282r5378k42/}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/AuthenticationEuroPKI2008.pdf},
}
1456474
@conference{1456474,
  title = {Tahoe: the least-authority filesystem}, 
  author = {Wilcox-O'Hearn, Zooko and Warner, Brian}, 
  booktitle = {StorageSS '08: Proceedings of the 4th ACM international workshop on Storage
        security and survivability}, 
  organization = {ACM}, 
  year = {2008}, 
  address = {New York, NY, USA}, 
  pages = {21--26}, 
  publisher = {ACM}, 
  abstract = {Tahoe is a system for secure, distributed storage. It uses capabilities for
        access control, cryptography for confidentiality and integrity, and erasure
        coding for fault-tolerance. It has been deployed in a commercial backup service
        and is currently operational. The implementation is Open Source}, 
  www_section = {capabilities, fault-tolerance, P2P}, 
  isbn = {978-1-60558-299-3}, 
  doi = {10.1145/1456469.1456474}, 
  url = {http://portal.acm.org/citation.cfm?id=1456474$\#$}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/lafs.pdf}, 
}
1461118
@article{1461118,
  title = {Shortest-path routing in randomized DHT-based Peer-to-Peer systems}, 
  author = {Wang, Chih-Chiang and Harfoush, Khaled}, 
  journal = {Comput. Netw}, 
  volume = {52}, 
  number = {18}, 
  year = {2008}, 
  address = {New York, NY, USA}, 
  pages = {3307--3317}, 
  publisher = {Elsevier North-Holland, Inc}, 
  abstract = {Randomized DHT-based Peer-to-Peer (P2P) systems grant nodes certain
        flexibility in selecting their overlay neighbors, leading to irregular overlay
        structures but to better overall performance in terms of path latency, static
        resilience and local convergence. However, routing in the presence of overlay
        irregularity is challenging. In this paper, we propose a novel routing protocol,
        RASTER, that approximates shortest overlay routes between nodes in randomized
        DHTs. Unlike previously proposed routing protocols, RASTER encodes and aggregates
        routing information. Its simple bitmap-encoding scheme together with the proposed
        RASTER routing algorithm enable a performance edge over current overlay routing
        protocols. RASTER provides a forwarding overhead of merely a small constant
        number of bitwise operations, a routing performance close to optimal, and a
        better resilience to churn. RASTER also provides nodes with the flexibility to
        adjust the size of the maintained routing information based on their
        storage/processing capabilities. The cost of storing and exchanging encoded
        routing information is manageable and grows logarithmically with the number of
        nodes in the system}, 
  www_section = {distributed hash table, P2P, routing}, 
  issn = {1389-1286}, 
  doi = {10.1016/j.comnet.2008.07.014}, 
  url = {http://portal.acm.org/citation.cfm?id=1461118$\#$}, 
}
15043
@article{15043,
  title = {Revised report on the algorithmic language scheme}, 
  author = {Rees, Jonathan and Clinger, William and Richard Kelsey}, 
  journal = {SIGPLAN Not}, 
  volume = {21}, 
  number = {12}, 
  year = {1986}, 
  address = {New York, NY, USA}, 
  pages = {37--79}, 
  publisher = {ACM}, 
  abstract = {The report gives a defining description of the programming language Scheme.
        Scheme is a statically scoped and properly tail-recursive dialect of the Lisp
        programming language invented by Guy Lewis Steele Jr. and Gerald Jay Sussman. It
        was designed to have an exceptionally clear and simple semantics and few
        different ways to form expressions. A wide variety of programming paradigms,
        including imperative, functional, and message passing styles, find convenient
        expression in Scheme. The introduction offers a brief history of the language and
        of the report. The first three chapters present the fundamental ideas of the
        language and describe the notational conventions used for describing the language
        and for writing programs in the language}, 
  issn = {0362-1340}, 
  doi = {10.1145/15042.15043}, 
  url = {http://en.scientificcommons.org/42347723}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/r5rs.pdf}, 
  www_section = {Unsorted}, 
}
1524297
@conference{1524297,
  title = {Query Forwarding Algorithm Supporting Initiator Anonymity in GNUnet}, 
  author = {Tatara, Kohei and Hori, Y. and Sakurai, Kouichi}, 
  booktitle = {Parallel and Distributed Systems, 2005. Proceedings. 11th International
        Conference on}, 
  volume = {2}, 
  year = {2005}, 
  month = {July}, 
  pages = {235--239}, 
  abstract = {Anonymity in peer-to-peer network means that it is difficult to associate a
        particular communication with a sender or a recipient. Recently, anonymous
        peer-to-peer framework, called GNUnet, was developed. A primary feature of GNUnet
        is resistance to traffic-analysis. However, Kugler analyzed a routing protocol in
        GNUnet, and pointed out traceability of initiator. In this paper, we propose an
        alternative routing protocol applicable in GNUnet, which is resistant to Kugler's
        shortcut attacks}, 
  www_section = {anonymity, GNUnet, routing, shortcut}, 
  issn = {1521-9097}, 
  doi = {10.1109/ICPADS.2005.246}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/kugler2.pdf}, 
}
1551621
@conference{1551621,
  title = {Maintaining reference graphs of globally accessible objects in fully
        decentralized distributed systems}, 
  author = {Bjoern Saballus and Thomas Fuhrmann}, 
  booktitle = {HPDC '09: Proceedings of the 18th ACM international symposium on High
        performance distributed computing}, 
  organization = {ACM}, 
  year = {2009}, 
  address = {New York, NY, USA}, 
  pages = {59--60}, 
  publisher = {ACM}, 
  abstract = {Since the advent of electronic computing, the processors' clock speed has
        risen tremendously. Now that energy efficiency requirements have stopped that
        trend, the number of processing cores per machine started to rise. In near
        future, these cores will become more specialized, and their inter-connections
        will form complex networks, both on-chip and beyond. This trend opens new fields
        of applications for high performance computing: Heterogeneous architectures offer
        different functionalities and thus support a wider range of applications. The
        increased compute power of these systems allows more complex simulations and
        numerical computations. Falling costs enable even small companies to invest in
        multi-core systems and clusters. However, the growing complexity might impede
        this growth. Imagine a cluster of thousands of interconnected heterogeneous
        processor cores. A software developer will need a deep knowledge about the
        underlying infrastructure as well as the data and communication dependencies in
        her application to partition it optimally across the available cores. Moreover, a
        predetermined partitioning scheme cannot reflect failing processors or
        additionally provided resources. In our poster, we introduce J-Cell, a project
        that aims at simplifying high performance distributed computing. J-Cell offers a
        single system image, which allows applications to run transparently on
        heterogeneous multi-core machines. It distributes code, objects and threads onto
        the compute resources which may be added or removed at run-time. This dynamic
        property leads to an ad-hoc network of processors and cores. In this network, a
        fully decentralized object localization and retrieval algorithm guarantees the
        access to distributed shared objects}, 
  www_section = {globally accessible objects, single system image}, 
  isbn = {978-1-60558-587-1}, 
  doi = {10.1145/1551609.1551621}, 
  url = {http://portal.acm.org/citation.cfm?id=1551609.1551621$\#$}, 
}
1582481
@conference{1582481,
  title = {Using link-layer broadcast to improve scalable source routing}, 
  author = {Di, Pengfei and Thomas Fuhrmann}, 
  booktitle = {IWCMC '09: Proceedings of the 2009 International Conference on Wireless
        Communications and Mobile Computing}, 
  organization = {ACM}, 
  year = {2009}, 
  month = {January}, 
  address = {New York, NY, USA}, 
  pages = {466--471}, 
  publisher = {ACM}, 
  abstract = {Scalable source routing (SSR) is a network layer routing protocol that
        provides services that are similar to those of structured peer-to-peer overlays.
        In this paper, we describe several improvements to the SSR protocol. They aim at
        providing nodes with more up-to-date routing information: 1. The use of
        link-layer broadcast enables all neighbors of a node to contribute to the
        forwarding process. 2. A light-weight and fast selection mechanism avoids packet
        duplication and optimizes the source route iteratively. 3. Nodes implicitly learn
        the network's topology from overheard broadcast messages. We present simulation
        results which show the performance gain of the proposed improvements: 1. The
        delivery ratio in settings with high mobility increases. 2. The required per-node
        state can be reduced as compared with the original SSR protocol. 3. The route
        stretch decreases. --- These improvements are achieved without increasing the
        routing overhead}, 
  www_section = {mobile Ad-hoc networks, P2P, routing, scalable source routing}, 
  isbn = {978-1-60558-569-7}, 
  doi = {10.1145/1582379.1582481}, 
  url = {http://portal.acm.org/citation.cfm?id=1582481$\#$}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/di09broadcastssr.pdf}, 
}
1590633
@conference{1590633,
  title = {Wireless Sensor Networks: A Survey}, 
  author = {Potdar, Vidyasagar and Sharif, Atif and Chang, Elizabeth}, 
  booktitle = {WAINA '09: Proceedings of the 2009 International Conference on Advanced
        Information Networking and Applications Workshops}, 
  organization = {IEEE Computer Society}, 
  year = {2009}, 
  address = {Washington, DC, USA}, 
  pages = {636--641}, 
  publisher = {IEEE Computer Society}, 
  abstract = {Wireless Sensor Networks (WSN), an element of pervasive computing, are
        presently being used on a large scale to monitor real-time environmental status.
        However these sensors operate under extreme energy constraints and are designed
        by keeping an application in mind. Designing a new wireless sensor node is
        extremely challenging task and involves assessing a number of different
        parameters required by the target application, which includes range, antenna
        type, target technology, components, memory, storage, power, life time, security,
        computational capability, communication technology, power, size, programming
        interface and applications. This paper analyses commercially (and research
        prototypes) available wireless sensor nodes based on these parameters and
        outlines research directions in this area}, 
  www_section = {FPGA, wireless sensor network}, 
  isbn = {978-0-7695-3639-2}, 
  doi = {10.1109/WAINA.2009.192}, 
  url = {http://portal.acm.org/citation.cfm?id=1588304.1590633$\#$}, 
}
1646697
@article{1646697,
  title = {Improving delivery ratios for application layer multicast in mobile ad hoc
        networks}, 
  author = {Baumung, Peter and Martina Zitterbart and Kendy Kutzner}, 
  journal = {Comput. Commun}, 
  volume = {28}, 
  number = {14}, 
  year = {2005}, 
  address = {Newton, MA, USA}, 
  pages = {1669--1679}, 
  publisher = {Butterworth-Heinemann}, 
  abstract = {Delivering multicast data using application layer approaches offers different
        advantages, as group members communicate using so-called overlay networks. These
        consist of a multicast group's members connected by unicast tunnels. Since
        existing approaches for application layer delivery of multicast data in mobile ad
        hoc networks (short MANETs) only deal with routing but not with error recovery,
        this paper evaluates tailored mechanisms for handling packet losses and congested
        networks. Although illustrated at the example of a specific protocol, the
        mechanisms may be applied to arbitrary overlays. This paper also investigates how
        application layer functionality based on overlay networks can turn existing
        multicast routing protocols (like ODMRP, M-AODV,...) into (almost) reliable
        transport protocols}, 
  www_section = {mobile Ad-hoc networks, multicast, reliability}, 
  issn = {0140-3664}, 
  doi = {10.1016/j.comcom.2005.02.008}, 
  url = {http://portal.acm.org/citation.cfm?id=1646697$\#$}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.68.5832.pdf}, 
}
1656984
@conference{1656984,
  title = {Heterogeneous gossip}, 
  author = {Frey, Davide and Rachid Guerraoui and Anne-Marie Kermarrec and Boris Koldehofe
        and Mogensen, Martin and Monod, Maxime and Qu{\'e}ma, Vivien}, 
  booktitle = {Middleware '09: Proceedings of the 10th ACM/IFIP/USENIX International
        Conference on Middleware}, 
  organization = {Springer-Verlag New York, Inc}, 
  year = {2009}, 
  address = {New York, NY, USA}, 
  pages = {1--20}, 
  publisher = {Springer-Verlag New York, Inc}, 
  abstract = {Gossip-based information dissemination protocols are considered easy to
        deploy, scalable and resilient to network dynamics. Load-balancing is inherent in
        these protocols as the dissemination work is evenly spread among all nodes. Yet,
        large-scale distributed systems are usually heterogeneous with respect to network
        capabilities such as bandwidth. In practice, a blind load-balancing strategy
        might significantly hamper the performance of the gossip dissemination. This
        paper presents HEAP, HEterogeneity-Aware gossip Protocol, where nodes dynamically
        adapt their contribution to the gossip dissemination according to their bandwidth
        capabilities. Using a continuous, itself gossip-based, approximation of relative
        bandwidth capabilities, HEAP dynamically leverages the most capable nodes by
        increasing their fanout, while decreasing by the same proportion that of less
        capable nodes. HEAP preserves the simple and proactive (churn adaptation) nature
        of gossip, while significantly improving its effectiveness. We extensively
        evaluate HEAP in the context of a video streaming application on a testbed of 270
        PlanetLab nodes. Our results show that HEAP significantly improves the quality of
        the streaming over standard homogeneous gossip protocols, especially when the
        stream rate is close to the average available bandwidth}, 
  www_section = {heterogeneity, load balancing}, 
  url = {http://portal.acm.org/citation.cfm?id=1656984$\#$}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/middleware-monod.pdf}, 
}
1658999
@conference{1658999,
  title = {Scalable landmark flooding: a scalable routing protocol for WSNs}, 
  author = {Di, Pengfei and Thomas Fuhrmann}, 
  booktitle = {Co-Next Student Workshop '09: Proceedings of the 5th international student
        workshop on Emerging networking experiments and technologies}, 
  organization = {ACM}, 
  year = {2009}, 
  address = {New York, NY, USA}, 
  pages = {1--2}, 
  publisher = {ACM}, 
  abstract = {Wireless sensor networks (WSNs) are about to become a popular and inexpensive
        tool for all kinds of applications. More advanced applications also need
        end-to-end routing, which goes beyond the simple data dissemination and
        collection mechanisms of early WSNs. The special properties of WSNs -- scarce
        memory, CPU, and energy resources -- make this a challenge. The Dynamic Address
        Routing protocol (DART) could be a good candidate for WSN routing, if it were not
        so prone to link outages. In this paper, we propose Scalable Landmark Flooding
        (SLF), a new routing protocol for large WSNs. It combines ideas from landmark
        routing, flooding, and dynamic address routing. SLF is robust against link and
        node outages, requires only little routing state, and generates low maintenance
        traffic overhead}, 
  www_section = {wireless sensor network}, 
  isbn = {978-1-60558-751-6}, 
  doi = {10.1145/1658997.1658999}, 
  url = {http://portal.acm.org/citation.cfm?id=1658997.1658999$\#$}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/di09slf.pdf}, 
}
1659021
@conference{1659021,
  title = {Bloom filters and overlays for routing in pocket switched networks}, 
  author = {Mayer, Christoph P.}, 
  booktitle = {Co-Next Student Workshop '09: Proceedings of the 5th international student
        workshop on Emerging networking experiments and technologies}, 
  organization = {ACM}, 
  year = {2009}, 
  address = {New York, NY, USA}, 
  pages = {43--44}, 
  publisher = {ACM}, 
  abstract = {Pocket Switched Networks (PSN) [3] have become a promising approach for
        providing communication between scarcely connected human-carried devices. Such
        devices, e.g. mobile phones or sensor nodes, are exposed to human mobility and
        can therewith leverage inter-human contacts for store-and-forward routing.
        Efficiently routing in such delay tolerant networks is complex due to incomplete
        knowledge about the network, and high dynamics of the network. In this work we
        want to develop an extension of Bloom filters for resource-efficient routing in
        pocket switched networks. Furthermore, we argue that PSNs may become densely
        populated in special situations. We want to exploit such situations to perform
        collaborative calculations of forwarding-decisions. In this paper we present a
        simple scheme for distributed decision calculation using overlays and a DHT-based
        distributed variant of Bloom filters}, 
  www_section = {Bloom filter, overlay networks, pocket switched network}, 
  isbn = {978-1-60558-751-6}, 
  doi = {10.1145/1658997.1659021}, 
  url = {http://portal.acm.org/citation.cfm?doid=1658997.1659021$\#$}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/conext09-phdworkshop-cameraready.pdf},
}
1667071
@article{1667071,
  title = {Privacy-preserving similarity-based text retrieval}, 
  author = {Pang, Hweehwa and Shen, Jialie and Krishnan, Ramayya}, 
  journal = {ACM Trans. Internet Technol}, 
  volume = {10}, 
  number = {1}, 
  year = {2010}, 
  address = {New York, NY, USA}, 
  pages = {1--39}, 
  publisher = {ACM}, 
  abstract = {Users of online services are increasingly wary that their activities could
        disclose confidential information on their business or personal activities. It
        would be desirable for an online document service to perform text retrieval for
        users, while protecting the privacy of their activities. In this article, we
        introduce a privacy-preserving, similarity-based text retrieval scheme that (a)
        prevents the server from accurately reconstructing the term composition of
        queries and documents, and (b) anonymizes the search results from unauthorized
        observers. At the same time, our scheme preserves the relevance-ranking of the
        search server, and enables accounting of the number of documents that each user
        opens. The effectiveness of the scheme is verified empirically with two real text
        corpora}, 
  www_section = {keywords, privacy, search, text mining}, 
  issn = {1533-5399}, 
  doi = {http://doi.acm.org/10.1145/1667067.1667071}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/privacy_preserving_similarity.pdf},
  url = {https://bibliography.gnunet.org}, 
}
1672334
@article{1672334,
  title = {Reconnecting the internet with ariba: self-organizing provisioning of end-to-end
        connectivity in heterogeneous networks}, 
  author = {H{\"u}bsch, Christian and Mayer, Christoph P. and Sebastian Mies and Roland
        Bless and Oliver Waldhorst and Martina Zitterbart}, 
  journal = {SIGCOMM Comput. Commun. Rev}, 
  volume = {40}, 
  number = {1}, 
  year = {2010}, 
  address = {New York, NY, USA}, 
  pages = {131--132}, 
  publisher = {ACM}, 
  abstract = {End-to-End connectivity in today's Internet can no longer be taken for
        granted. Middleboxes, mobility, and protocol heterogeneity complicate application
        development and often result in application-specific solutions. In our demo we
        present ariba: an overlay-based approach to handle such network challenges and to
        provide consistent homogeneous network primitives in order to ease application
        and service development}, 
  www_section = {heterogeneity, overlay networks, P2P}, 
  issn = {0146-4833}, 
  doi = {10.1145/1672308.1672334}, 
  url = {http://portal.acm.org/citation.cfm?doid=1672308.1672334$\#$}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/p131-v40n1n-huebschA.pdf},
}
1698181
@booklet{1698181,
  title = {Routing with Byzantine robustness}, 
  author = {Perlman, Radia}, 
  year = {2005}, 
  address = {Mountain View, CA, USA}, 
  publisher = {Sun Microsystems, Inc}, 
  abstract = {This paper describes how a network can continue to function in the presence
        of Byzantine failures. A Byzantine failure is one in which a node, instead of
        halting (as it would in a fail-stop failure), continues to operate, but
        incorrectly. It might lie about routing information, perform the routing
        algorithm itself flawlessly, but then fail to forward some class of packets
        correctly, or flood the network with garbage traffic. Our goal is to design a
        network so that as long as one nonfaulty path connects nonfaulty nodes A and B,
        they will be able to communicate, with some fair share of bandwidth, even if all
        the other components in the network are maximally malicious. We review work from
        1988 that presented a network design that had that property, but required the
        network to be small enough so that every router could keep state proportional to
        n2, where n is the total number of nodes in the network. This would work for a
        network of size on the order of a thousand nodes, but to build a large network,
        we need to introduce hierarchy. This paper presents a new design, building on the
        original work, that works with hierarchical networks. This design not only
        defends against malicious routers, but because it guarantees fair allocation of
        resources, can mitigate against many other types of denial of service attacks}, 
  www_section = {routing}, 
  url = {http://portal.acm.org/citation.cfm?id=1698181$\#$}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/smli_tr-2005-146.pdf}, 
}
1759877
@conference{1759877,
  title = {CFR: a peer-to-peer collaborative file repository system}, 
  author = {Lin, Meng-Ru and Lu, Ssu-Hsuan and Ho, Tsung-Hsuan and Lin, Peter and Chung,
        Yeh-Ching}, 
  booktitle = {GPC'07: Proceedings of the 2nd international conference on Advances in grid
        and pervasive computing}, 
  organization = {Springer-Verlag}, 
  year = {2007}, 
  address = {Berlin, Heidelberg}, 
  pages = {100--111}, 
  publisher = {Springer-Verlag}, 
  abstract = {Due to the high availability of the Internet, many large cross-organization
        collaboration projects, such as SourceForge, grid systems etc., have emerged. One
        of the fundamental requirements of these collaboration efforts is a storage
        system to store and exchange data. This storage system must be highly scalable
        and can efficiently aggregate the storage resources contributed by the
        participating organizations to deliver good performance for users. In this paper,
        we propose a storage system, Collaborative File Repository (CFR), for large scale
        collaboration projects. CFR uses peer-to-peer techniques to achieve scalability,
        efficiency, and ease of management. In CFR, storage nodes contributed by the
        participating organizations are partitioned according to geographical regions.
        Files stored in CFR are automatically replicated to all regions. Furthermore,
        popular files are duplicated to other storage nodes of the same region. By doing
        so, data transfers between users and storage nodes are confined within their
        regions and transfer efficiency is enhanced. Experiments show that our
        replication can achieve high efficiency with a small number of duplicates}, 
  www_section = {P2P, storage}, 
  isbn = {978-3-540-72359-2}, 
  url = {http://portal.acm.org/citation.cfm?id=1759877\&dl=GUIDE\&coll=GUIDE$\#$}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.108.7110.pdf}, 
}
1827424
@conference{1827424,
  title = {Cordies: expressive event correlation in distributed systems}, 
  author = {Gerald G. Koch and Boris Koldehofe and Kurt Rothermel}, 
  booktitle = {DEBS '10: Proceedings of the Fourth ACM International Conference on
        Distributed Event-Based Systems}, 
  organization = {ACM}, 
  year = {2010}, 
  address = {New York, NY, USA}, 
  pages = {26--37}, 
  publisher = {ACM}, 
  abstract = {Complex Event Processing (CEP) is the method of choice for the observation of
        system states and situations by means of events. A number of systems have been
        introduced that provide CEP in selected environments. Some are restricted to
        centralised systems, or to systems with synchronous communication, or to a
        limited space of event relations that are defined in advance. Many modern
        systems, though, are inherently distributed and asynchronous, and require a more
        powerful CEP. We present Cordies, a distributed system for the detection of
        correlated events that is designed for the operation in large-scale,
        heterogeneous networks and adapts dynamically to changing network conditions.
        With its expressive language to describe event relations, it is suitable for
        environments where neither the event space nor the situations of interest are
        predefined but are constantly adapted. In addition, Cordies supports
        Quality-of-Service (QoS) for communication in distributed event correlation
        detection}, 
  www_section = {QoS}, 
  isbn = {978-1-60558-927-5}, 
  doi = {10.1145/1827418.1827424}, 
  url = {http://portal.acm.org/citation.cfm?id=1827424\&dl=GUIDE\&coll=portal\&CFID=97675623\&CFTOKEN=70931453$\#$},
}
1827425
@conference{1827425,
  title = {Providing basic security mechanisms in broker-less publish/subscribe systems}, 
  author = {Tariq, Muhammad Adnan and Boris Koldehofe and Altaweel, Ala and Kurt
        Rothermel}, 
  booktitle = {DEBS '10: Proceedings of the Fourth ACM International Conference on
        Distributed Event-Based Systems}, 
  organization = {ACM}, 
  year = {2010}, 
  address = {New York, NY, USA}, 
  pages = {38--49}, 
  publisher = {ACM}, 
  abstract = {The provisioning of basic security mechanisms such as authentication and
        confidentiality is highly challenging in a content-based publish/subscribe
        system. Authentication of publishers and subscribers is difficult to achieve due
        to the loose coupling of publishers and subscribers. Similarly, confidentiality
        of events and subscriptions conflicts with content-based routing. In particular,
        content-based approaches in broker-less environments do not address
        confidentiality at all. This paper presents a novel approach to provide
        confidentiality and authentication in a broker-less content-based
        publish-subscribe system. The authentication of publishers and subscribers as
        well as confidentiality of events is ensured, by adapting the pairing-based
        cryptography mechanisms, to the needs of a publish/subscribe system. Furthermore,
        an algorithm to cluster subscribers according to their subscriptions preserves a
        weak notion of subscription confidentiality. Our approach provides fine grained
        key management and the cost for encryption, decryption and routing is in the
        order of subscribed attributes. Moreover, the simulation results verify that
        supporting security is affordable with respect to the cost for overlay
        construction and event dissemination latencies, thus preserving scalability of
        the system}, 
  www_section = {P2P, publish/subscribe}, 
  isbn = {978-1-60558-927-5}, 
  doi = {10.1145/1827418.1827425}, 
  url = {http://portal.acm.org/citation.cfm?id=1827418.1827425\&coll=portal\&dl=GUIDE$\#$},
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/DIP_2872.pdf}, 
}
1944
@book{1944,
  title = {The Theory of Games and Economic Behavior}, 
  author = {John von Neumann and Oskar Morgenstern}, 
  organization = {Princeton University Press}, 
  year = {1944}, 
  address = {Princeton, New Jersey, USA}, 
  edition = {60th}, 
  pages = {0--776}, 
  publisher = {Princeton University Press}, 
  www_section = {economic behavior, games, theory}, 
  isbn = {978-0-691-13061-3}, 
  url = {http://www.archive.org/details/theoryofgamesand030098mbp}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Neumann\%20\%26\%20Morgenstern\%20-\%20Theory\%20of\%20Games\%20and\%20Economic\%20Behavior.pdf},
}
1950
@article{1950,
  title = {Equilibrium points in n-person games}, 
  author = {John F. Nash Jr.}, 
  journal = {PNAS. Proceedings of the National Academy of Sciences of the USA}, 
  volume = {36}, 
  year = {1950}, 
  month = {January}, 
  pages = {48--49}, 
  abstract = {One may define a concept of an n-person game in which each player has a
        finite set of pure strategies and in which a definite set of payments to the n
        players corresponds to each n-tuple of pure strategies, one strategy being taken
        for each player. For mixed strategies, which are probability distributions over
        the pure strategies, the pay-off functions are the expectations of the players,
        thus becoming polylinear forms}, 
  www_section = {n-persona game, strategy}, 
  doi = {10.1073/pnas.36.1.48}, 
  url = {https://bibliography.gnunet.org}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/PNAS\%20-\%20Nash\%20-\%20Equilibrium\%20points\%20in\%20n-person\%20games.pdf},
}
1959
@article{1959,
  title = {On Random Graphs I}, 
  author = {Paul Erd{\H o}s and Alfr{\'e}d R{\'e}nyi}, 
  journal = {Publicationes Mathematicae (Debrecen)}, 
  volume = {6}, 
  year = {1959}, 
  month = {January}, 
  pages = {290--297}, 
  www_section = {graphs, random, random graphs}, 
  url = {https://bibliography.gnunet.org}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Erd\%C5\%91s\%20\%26\%20R\%C3\%A9nyi\%20-\%20On\%20Random\%20Graphs.pdf},
}
1962
@article{1962,
  title = {Low-density parity-check codes}, 
  author = {Robert G. Gallager}, 
  journal = {Information Theory, IRE Transactions on}, 
  volume = {8}, 
  year = {1962}, 
  chapter = {21}, 
  pages = {21--28}, 
  abstract = {A low-density parity-check code is a code specified by a parity-check matrix
        with the following properties: each column contains a small fixed numberj geq 3of
        l's and each row contains a small fixed numberk > jof l's. The typical minimum
        distance of these codes increases linearly with block length for a fixed rate and
        fixedj. When used with maximum likelihood decoding on a sufficiently quiet
        binary-input symmetric channel, the typical probability of decoding error
        decreases exponentially with block length for a fixed rate and fixedj. A simple
        but nonoptimum decoding scheme operating directly from the channel a posteriori
        probabilities is described. Both the equipment complexity and the data-handling
        capacity in bits per second of this decoder increase approximately linearly with
        block length. Forj > 3and a sufficiently low rate, the probability of error using
        this decoder on a binary symmetric channel is shown to decrease at least
        exponentially with a root of the block length. Some experimental results show
        that the actual probability of decoding error is much smaller than this
        theoretical bound}, 
  www_section = {coding theory, low-density parity-check}, 
  issn = {0096-1000}, 
  doi = {10.1109/TIT.1962.1057683}, 
  url = {http://ieeexplore.ieee.org/xpl/freeabs_all.jsp?arnumber=1057683}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/ldpc.pdf}, 
}
1968
@article{1968,
  title = {The Tragedy of the Commons}, 
  author = {Garrett Hardin}, 
  journal = {Science}, 
  volume = {162}, 
  year = {1968}, 
  pages = {1243--1248}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Science\%20-\%20Hardin\%20-\%20The\%20Tragedy\%20of\%20the\%20Commons.pdf},
  www_section = {Unsorted}, 
  url = {https://bibliography.gnunet.org}, 
}
1970_0
@article{1970_0,
  title = {An Efficient Heuristic Procedure for Partitioning Graphs}, 
  author = {Brian W. Kernighan and S. Lin}, 
  journal = {The Bell System Technical Journal}, 
  volume = {49}, 
  year = {1970}, 
  month = {January}, 
  pages = {291--307}, 
  abstract = {We consider the problem of partitioning the nodes of a graph with costs on
        its edges into subsets of given sizes so as to minimize the sum of the costs on
        all edges cut. This problem arises in several physical situations- for example,
        in assigning the components of electronic circuits to circuit boards to minimize
        the number of connections between boards. This paper presents a heuristic method
        for partitioning arbitrary graphs which is both effective in finding optimal
        partitions, and fast enough to be practical in solving large problems}, 
  www_section = {heuristic method, partitioning graphs}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Kernighan\%20\%26\%20Lin\%20-\%20An\%20Efficient\%20Heuristic\%20Procedure\%20for\%20Partitioning\%20Graphs\%250A.pdf},
  url = {https://bibliography.gnunet.org}, 
}
1970_1
@article{1970_1,
  title = {The market for "lemons": Quality uncertainty and the market mechanism}, 
  author = {George A. Akerlof}, 
  journal = {The Quarterly Journal of Economics}, 
  volume = {84}, 
  year = {1970}, 
  month = {August}, 
  pages = {488--500}, 
  abstract = {I. Introduction, 488.--II. The model with automobiles as an example,
        489.--III. Examples and applications, 492.--IV. Counteracting institutions,
        499.--V. Conclusion, 500}, 
  url = {http://www.jstor.org/stable/1879431}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/akerlof.pdf}, 
  www_section = {Unsorted}, 
}
1971
@article{1971,
  title = {The Evolution of Reciprocal Altruism}, 
  author = {Robert L. Trivers}, 
  journal = {The Quarterly Review of Biology}, 
  volume = {46}, 
  year = {1971}, 
  month = {March}, 
  pages = {35--57}, 
  abstract = {A model is presented to account for the natural selection of what is termed
        reciprocally altruistic behavior. The model shows how selection can operate
        against the cheater (non-reciprocator) in the system. Three instances of
        altruistic behavior are discussed, the evolution of which the model can explain:
        (1) behavior involved in cleaning symbioses; (2) warning cries in birds; and (3)
        human reciprocal altruism. Regarding human reciprocal altruism, it is shown that
        the details of the psychological system that regulates this altruism can be
        explained by the model. Specifically, friendship, dislike, moralistic aggression,
        gratitude, sympathy, trust, suspicion, trustworthiness, aspects of guilt, and
        some forms of dishonesty and hypocrisy can be explained as important adaptations
        to regulate the altruistic system. Each individual human is seen as possessing
        altruistic and cheating tendencies, the expression of which is sensitive to
        developmental variables that were selected to set the tendencies at a balance
        appropriate to the local social and ecological environment}, 
  www_section = {behavior, evolution, reciprocal altruism}, 
  url = {http://www.jstor.org/pss/2822435}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Trivers\%20-\%20The\%20evolution\%20of\%20reciprocal\%20altruism.pdf},
}
1977
@article{1977,
  title = {Towards a methodology for statistical disclosure control}, 
  author = {Dalenius, T.}, 
  journal = {Statistik Tidskrift}, 
  volume = {15}, 
  year = {1977}, 
  pages = {2--1}, 
  www_section = {database_privacy differential_privacy stat}, 
  url = {https://bibliography.gnunet.org}, 
}
1979
@conference{1979,
  title = {Compact Encodings of List Structure}, 
  author = {Daniel G. Bobrow and Douglas W. Clark}, 
  booktitle = {Compact Encodings of List Structure}, 
  organization = {ACM New York, NY, USA}, 
  year = {1979}, 
  publisher = {ACM New York, NY, USA}, 
  abstract = {List structures provide a general mechanism for representing easily changed
        structured data, but can introduce inefficiencies in the use of space when fields
        of uniform size are used to contain pointers to data and to link the structure.
        Empirically determined regularity can be exploited to provide more
        space-efficient encodings without losing the flexibility inherent in list
        structures. The basic scheme is to provide compact pointer fields big enough to
        accommodate most values that occur in them and to provide
        {\textquotedblleft}escape{\textquotedblright} mechanisms for exceptional cases.
        Several examples of encoding designs are presented and evaluated, including two
        designs currently used in Lisp machines. Alternative escape mechanisms are
        described, and various questions of cost and implementation are discussed. In
        order to extrapolate our results to larger systems than those measured, we
        propose a model for the generation of list pointers and we test the model against
        data from two programs. We show that according to our model, list structures with
        compact cdr fields will, as address space grows, continue to be compacted well
        with a fixed-width small field. Our conclusion is that with a microcodable
        processor, about a factor of two gain in space efficiency for list structure can
        be had for little or no cost in processing time}, 
  doi = {10.1145/357073.357081}, 
  url = {http://portal.acm.org/citation.cfm?id=357081$\#$collab}, 
  www_section = {Unsorted}, 
}
1982
@conference{1982,
  title = {Protocols for Secure Computations}, 
  author = {Yao, Andrew C.}, 
  booktitle = {Proceedings of the 23rd Annual Symposium on Foundations of Computer
        Science}, 
  organization = {IEEE Computer Society}, 
  year = {1982}, 
  address = {Washington, DC, USA}, 
  publisher = {IEEE Computer Society}, 
  doi = {10.1109/SFCS.1982.88}, 
  url = {http://dx.doi.org/10.1109/SFCS.1982.88}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/ProtocolSecurecomputations1982Yao.pdf},
  www_section = {Unsorted}, 
}
1986
@book{1986,
  title = {Networks Without User Observability {\textemdash} Design Options}, 
  author = {Pfitzmann, Andreas and Waidner, Michael}, 
  booktitle = {Advances in Cryptology {\textemdash} EUROCRYPT' 85}, 
  organization = {Springer Berlin Heidelberg}, 
  volume = {219}, 
  year = {1986}, 
  pages = {245--253}, 
  editor = {Pichler, Franz}, 
  publisher = {Springer Berlin Heidelberg}, 
  series = {Lecture Notes in Computer Science}, 
  abstract = {In usual communication networks, the network operator or an intruder could
        easily observe when, how much and with whom the users communicate (traffic
        analysis), even if the users employ end-to-end encryption. When ISDNs are used
        for almost everything, this becomes a severe threat. Therefore, we summarize
        basic concepts to keep the recipient and sender or at least their relationship
        unobservable, consider some possible implementations and necessary hierarchical
        extensions, and propose some suitable performance and reliability enhancements}, 
  isbn = {978-3-540-16468-5}, 
  doi = {10.1007/3-540-39805-8_29}, 
  url = {http://dx.doi.org/10.1007/3-540-39805-8_29}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/NetworkWithoutUserObservabiliy1985Pfitzmann.pdf},
  www_section = {Unsorted}, 
}
1987
@conference{1987,
  title = {How to Play ANY Mental Game or A Completeness Theorem for Protocols with Honest
        Majority}, 
  author = {Goldreich, O. and Micali, S. and Wigderson, A.}, 
  booktitle = {Proceedings of the Nineteenth Annual ACM Symposium on Theory of Computing}, 
  organization = {ACM}, 
  year = {1987}, 
  address = {New York, NY, USA}, 
  publisher = {ACM}, 
  abstract = {We present a polynomial-time algorithm that, given as a input the description
        of a game with incomplete information and any number of players, produces a
        protocol for playing the game that leaks no partial information, provided the
        majority of the players is honest. Our algorithm automatically solves all the
        multi-party protocol problems addressed in complexity-based cryptography during
        the last 10 years. It actually is a completeness theorem for the class of
        distributed protocols with honest majority. Such completeness theorem is optimal
        in the sense that, if the majority of the players is not honest, some protocol
        problems have no efficient solution [C]}, 
  isbn = {0-89791-221-7}, 
  doi = {10.1145/28395.28420}, 
  url = {http://doi.acm.org/10.1145/28395.28420}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/PlayMentalGame1987Goldreich.pdf},
  www_section = {Unsorted}, 
}
1988_0
@conference{1988_0,
  title = {Completeness Theorems for Non-cryptographic Fault-tolerant Distributed
        Computation}, 
  author = {Ben-Or, Michael and Goldwasser, Shafi and Wigderson, Avi}, 
  booktitle = {Proceedings of the Twentieth Annual ACM Symposium on Theory of Computing}, 
  organization = {ACM}, 
  year = {1988}, 
  address = {New York, NY, USA}, 
  publisher = {ACM}, 
  abstract = {Every function of n inputs can be efficiently computed by a complete network
        of n processors in such a way that: If no faults occur, no set of size t < n/2 of
        players gets any additional information (other than the function value), Even if
        Byzantine faults are allowed, no set of size t < n/3 can either disrupt the
        computation or get additional information. Furthermore, the above bounds on t are
        tight!}, 
  isbn = {0-89791-264-0}, 
  doi = {10.1145/62212.62213}, 
  url = {http://doi.acm.org/10.1145/62212.62213}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/CompletelenessTheorems1988Ben-Or.pdf},
  www_section = {Unsorted}, 
}
1988_1
@conference{1988_1,
  title = {Founding Crytpography on Oblivious Transfer}, 
  author = {Kilian, Joe}, 
  booktitle = {Proceedings of the Twentieth Annual ACM Symposium on Theory of Computing}, 
  organization = {ACM}, 
  year = {1988}, 
  address = {New York, NY, USA}, 
  publisher = {ACM}, 
  abstract = {Suppose your netmail is being erratically censored by Captain Yossarian.
        Whenever you send a message, he censors each bit of the message with probability
        1/2, replacing each censored bit by some reserved character. Well versed in such
        concepts as redundancy, this is no real problem to you. The question is, can it
        actually be turned around and used to your advantage? We answer this question
        strongly in the affirmative. We show that this protocol, more commonly known as
        oblivious transfer, can be used to simulate a more sophisticated protocol, known
        as oblivious circuit evaluation([Y]). We also show that with such a communication
        channel, one can have completely noninteractive zero-knowledge proofs of
        statements in NP. These results do not use any complexity-theoretic assumptions.
        We can show that they have applications to a variety of models in which oblivious
        transfer can be done}, 
  www_section = {oblivious circuits}, 
  isbn = {0-89791-264-0}, 
  doi = {10.1145/62212.62215}, 
  url = {http://doi.acm.org/10.1145/62212.62215}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/oblivious_transfer.pdf},
}
1993_0
@article{1993_0,
  title = {Allocative Efficiency of Markets with Zero-Intelligence Traders: Market as a
        Partial Substitute for Individual Rationality}, 
  author = {Dhananjay K. Gode and Shyam Sunder}, 
  journal = {Journal of Political Economy}, 
  volume = {101}, 
  year = {1993}, 
  month = {February}, 
  pages = {119--137}, 
  abstract = {We report market experiments in which human traders are replaced by
        "zero-intelligence" programs that submit random bids and offers. Imposing a
        budget constraint (i.e., not permitting traders to sell below their costs or buy
        above their values) is sufficient to raise the allocative efficiency of these
        auctions close to 100 percent. Allocative efficiency of a double auction derives
        largely from its structure, independent of traders' motivation, intelligence, or
        learning. Adam Smith's invisible hand may be more powerful than some may have
        thought; it can generate aggregate rationality not only from individual
        rationality but also from individual irrationality}, 
  www_section = {allocative efficiency, double auction, market, zero-intelligence trader}, 
  url = {http://www.jstor.org/stable/2138676}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/JPE\%20\%281993\%29\%20-\%20Gode\%20\%26\%20Sunder\%20-\%20Allocative\%20Efficiency.pdf},
}
1993_1
@book{1993_1,
  title = {Elliptic Curve Public Key Cryptosystems}, 
  author = {Alfred J. Menezes}, 
  organization = {Springer}, 
  volume = {234}, 
  year = {1993}, 
  pages = {0--144}, 
  publisher = {Springer}, 
  series = {The Springer International Series in Engineering and Computer Science}, 
  abstract = {Elliptic curves have been intensively studied in algebraic geometry and
        number theory. In recent years they have been used in devising efficient
        algorithms for factoring integers and primality proving, and in the construction
        of public key cryptosystems. Elliptic Curve Public Key Cryptosystems provides an
        up-to-date and self-contained treatment of elliptic curve-based public key
        cryptology. Elliptic curve cryptosystems potentially provide equivalent security
        to the existing public key schemes, but with shorter key lengths. Having short
        key lengths means smaller bandwidth and memory requirements and can be a crucial
        factor in some applications, for example the design of smart card systems. The
        book examines various issues which arise in the secure and efficient
        implementation of elliptic curve systems. Elliptic Curve Public Key Cryptosystems
        is a valuable reference resource for researchers in academia, government and
        industry who are concerned with issues of data security. Because of the
        comprehensive treatment, the book is also suitable for use as a text for advanced
        courses on the subject}, 
  www_section = {algebraic geometry, elliptic curve cryptography, number theory, public key
        cryptosystem}, 
  isbn = {978-0-7923-9368-9}, 
  url = {http://books.google.com/books/about/Elliptic_curve_public_key_cryptosystems.html?id=bIb54ShKS68C},
}
1997_0
@conference{1997_0,
  title = {Privacy-enhancing Technologies for the Internet}, 
  author = {Ian Goldberg and David Wagner and Eric Brewer}, 
  booktitle = {Compcon '97. Proceedings, IEEE}, 
  organization = {IEEE Computer Society}, 
  year = {1997}, 
  month = {February}, 
  address = {San Jose, CA, United States}, 
  publisher = {IEEE Computer Society}, 
  abstract = {The increased use of the Internet for everyday activities is bringing new
        threats to personal privacy. This paper gives an overview of existing and
        potential privacy-enhancing technologies for the Internet, as well as motivation
        and challenges for future work in this field}, 
  www_section = {Internet, privacy, privacy-enhancing technology}, 
  isbn = {0818678046}, 
  url = {http://www.cs.berkeley.edu/~daw/papers/privacy-compcon97-www/privacy-html.html}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Compcon\%20\%2797\%20-\%20Privacy-enhancing\%20Technologies\%20for\%20the\%20Internet.pdf},
}
1997_1
@article{1997_1,
  title = {A Reliable Multicast Framework for Light-weight Sessions and Application Level
        Framing}, 
  author = {Floyd, Sally and Jacobson, Van and Liu, Ching-Gung and McCanne, Steven and
        Zhang, Lixia}, 
  journal = {IEEE/ACM Trans. Netw}, 
  volume = {5}, 
  year = {1997}, 
  pages = {784--803}, 
  abstract = {This paper describes SRM (Scalable Reliable Multicast), a reliable multicast
        framework for light-weight sessions and application level framing. The algorithms
        of this framework are efficient, robust, and scale well to both very large
        networks and very large sessions. The SRM framework has been prototyped in wb, a
        distributed whiteboard application, which has been used on a global scale with
        sessions ranging from a few to a few hundred participants. The paper describes
        the principles that have guided the SRM design, including the IP multicast group
        delivery model, an end-to-end, receiver-based model of reliability, and the
        application level framing protocol model. As with unicast communications, the
        performance of a reliable multicast delivery algorithm depends on the underlying
        topology and operational environment. We investigate that dependence via analysis
        and simulation, and demonstrate an adaptive algorithm that uses the results of
        previous loss recovery events to adapt the control parameters used for future
        loss recovery. With the adaptive algorithm, our reliable multicast delivery
        algorithm provides good performance over a wide range of underlying topologies}, 
  www_section = {computer network performance, computer networks, Internetworking}, 
  issn = {1063-6692}, 
  doi = {10.1109/90.650139}, 
  url = {http://dx.doi.org/10.1109/90.650139}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Reliable_MultiCast1997Flyod.pdf},
}
1999_0
@mastersthesis{1999_0,
  title = {A Distributed Decentralized Information Storage and Retrieval System}, 
  author = {Ian Clarke}, 
  school = {University of Edinburgh}, 
  volume = {PhD}, 
  year = {1999}, 
  abstract = {This report describes an algorithm which if executed by a group of
        interconnected nodes will provide a robust key-indexed information storage and
        retrieval system with no element of central control or administration. It allows
        information to be made available to a large group of people in a similar manner
        to the "World Wide Web". Improvements over this existing system include:--No
        central control or administration required--Anonymous information publication and
        retrieval--Dynamic duplication of popular information--Transfer of information
        location depending upon demand There is also potential for this system to be used
        in a modified form as an information publication system within a large
        organisation which may wish to utilise unused storage space which is distributed
        across the organisation. The system's reliability is not guaranteed, nor is its
        efficiency, however the intention is that the efficiency and reliability will be
        sufficient to make the system useful, and demonstrate that}, 
  url = {https://bibliography.gnunet.org}, 
%%%%% ERROR: Missing field
% www_section = {?????},
}
1999_1
@conference{1999_1,
  title = {Public-key Cryptosystems Based on Composite Degree Residuosity Classes}, 
  author = {Paillier, Pascal}, 
  booktitle = {Proceedings of the 17th International Conference on Theory and Application
        of Cryptographic Techniques}, 
  organization = {Springer-Verlag}, 
  year = {1999}, 
  address = {Berlin, Heidelberg}, 
  publisher = {Springer-Verlag}, 
  abstract = {This paper investigates a novel computational problem, namely the Composite
        Residuosity Class Problem, and its applications to public-key cryptography. We
        propose a new trapdoor mechanism and derive from this technique three encryption
        schemes : a trapdoor permutation and two homomorphic probabilistic encryption
        schemes computationally comparable to RSA. Our cryptosystems, based on usual
        modular arithmetics, are provably secure under appropriate assumptions in the
        standard model}, 
  isbn = {3-540-65889-0}, 
  url = {http://dl.acm.org/citation.cfm?id=1756123.1756146}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/PublicKeyCryptoSystems1999Paillier.pdf},
%%%%% ERROR: Missing field
% www_section = {?????},
}
200
@booklet{200,
  title = {Defending the Sybil Attack in P2P Networks: Taxonomy, Challenges, and a Proposal
        for Self-Registration}, 
  author = {Jochen Dinger and Hannes Hartenstein}, 
  journal = {DAS-P2P 2006}, 
  year = {2006}, 
  month = {April}, 
  publisher = {Institut fur Telematik, Universitat Karsruhe (TH), Germany}, 
  abstract = {The robustness of Peer-to-Peer (P2P) networks, in particular of DHT-based
        overlay networks, suffers significantly when a Sybil attack is performed. We
        tackle the issue of Sybil attacks from two sides. First, we clarify, analyze, and
        classify the P2P identifier assignment process. By clearly separating network
        participants from network nodes, two challenges of P2P networks under a Sybil
        attack become obvious: i) stability over time, and ii) identity differentiation.
        Second, as a starting point for a quantitative analysis of time-stability of P2P
        networks under Sybil attacks and under some assumptions with respect to identity
        differentiation, we propose an identity registration procedure called
        self-registration that makes use of the inherent distribution mechanisms of a P2P
        network}, 
  www_section = {attack, P2P, robustness}, 
  url = {http://dsn.tm.uni-karlsruhe.de/medien/publication-confs/dinger_dasp2p06_sybil.pdf},
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.60.8756.pdf}, 
}
2000
@book{2000,
  title = {Trust-region methods}, 
  author = {Andrew R. Conn and Nicholas I. M. Gould and Philippe L. Toint}, 
  organization = {Society for Industrial and Applied Mathematics and Mathematical
        Programming Society}, 
  year = {2000}, 
  address = {Philadelphia, PA}, 
  publisher = {Society for Industrial and Applied Mathematics and Mathematical Programming
        Society}, 
  series = {MPS-SIAM Series on Optimization}, 
  isbn = {0898714605}, 
  issn = {978-0898714609}, 
  url = {http://books.google.com/books?hl=es\&lr=\&id=5kNC4fqssYQC\&oi=fnd\&pg=PR15\&dq=trust-region+methods\&ots=j1JMMQ3QJY\&sig=ncLlD3mqZ4KEQ1Z9V2qId4rNffo$\#$v=onepage\&q\&f=false},
%%%%% ERROR: Missing field
% www_section = {?????},
}
2001_0
@article{2001_0,
  title = {Automated Negotiation: Prospects, Methods and Challenges}, 
  author = {Nicholas R Jennings and Peyman Faratin and Alessio R. Lomuscio and Simon
        Parsons and Carles Sierra and Michael Wooldridge}, 
  journal = {Group Decision and Negociation}, 
  volume = {10}, 
  year = {2001}, 
  month = {March}, 
  pages = {199--215}, 
  abstract = {This paper is to examine the space of negotiation opportunities for
        autonomous agents, to identify and evaluate some of the key techniques, and to
        highlight some of the major challenges for future automated negotiation research.
        This paper is not meant as a survey of the field of automated negotiation.
        Rather, the descriptions and assessments of the various approaches are generally
        undertaken with particular reference to work in which the authors have been
        involved. However, the specific issues raised should be viewed as being broadly
        applicable}, 
  www_section = {automated negociation autonomous agent, negociation}, 
  doi = {10.1023}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Group\%20Decision\%20and\%20Negociation\%20-\%20Automated\%20Negociation.pdf},
}
2001_1
@article{2001_1,
  title = {DVD COPY CONTROL ASSOCIATION vs. ANDREW BUNNER}, 
  author = {unknown}, 
  journal = {unknown}, 
  year = {2001}, 
%%%%% ERROR: Missing field
% www_section = {?????},
}
2001_2
@conference{2001_2,
  title = {A Generalisation, a Simplification and Some Applications of Paillier's
        Probabilistic Public-Key System}, 
  author = {Damg\aard, Ivan and Jurik, Mats}, 
  booktitle = {Proceedings of the 4th International Workshop on Practice and Theory in
        Public Key Cryptography: Public Key Cryptography}, 
  organization = {Springer-Verlag}, 
  year = {2001}, 
  address = {London, UK, UK}, 
  publisher = {Springer-Verlag}, 
  abstract = {We propose a generalisation of Paillier's probabilistic public key system, in
        which the expansion factor is reduced and which allows to adjust the block length
        of the scheme even after the public key has been fixed, without loosing the
        homomorphic property. We show that the generalisation is as secure as Paillier's
        original system. We construct a threshold variant of the generalised scheme as
        well as zero-knowledge protocols to show that a given ciphertext encrypts one of
        a set of given plaintexts, and protocols to verify multiplicative relations on
        plaintexts. We then show how these building blocks can be used for applying the
        scheme to efficient electronic voting.This reduces dramatically the work needed
        to compute the final result of an election, compared to the previously best known
        schemes.W e show how the basic scheme for a yes/no vote can be easily adapted to
        casting a vote for up to t out of L candidates. The same basic building blocks
        can also be adapted to provide receipt-free elections, under appropriate physical
        assumptions. The scheme for 1 out of L elections can be optimised such that for a
        certain range of parameter values, a ballot has size only O(log L) bits}, 
  isbn = {3-540-41658-7}, 
  url = {http://dl.acm.org/citation.cfm?id=648118.746742}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Generalisation2001Damgard.pdf},
%%%%% ERROR: Missing field
% www_section = {?????},
}
2001_3
@conference{2001_3,
  title = {Investigating the energy consumption of a wireless network interface in an ad
        hoc networking environment}, 
  author = {Feeney, Laura Marie and Nilsson, Martin}, 
  booktitle = {INFOCOM 2001. Twentieth Annual Joint Conference of the IEEE Computer and
        Communications Societies. Proceedings. IEEE}, 
  year = {2001}, 
  month = {April}, 
  address = {Anchorage, AK , USA}, 
  abstract = {Energy-aware design and evaluation of network protocols requires knowledge of
        the energy consumption behavior of actual wireless interfaces. But little
        practical information is available about the energy consumption behavior of
        well-known wireless network interfaces and device specifications do not provide
        information in a form that is helpful to protocol developers. This paper
        describes a series of experiments which obtained detailed measurements of the
        energy consumption of an IEEE 802.11 wireless network interface operating in an
        ad hoc networking environment. The data is presented as a collection of linear
        equations for calculating the energy consumed in sending, receiving and
        discarding broadcast and point-to-point data packets of various sizes. Some
        implications for protocol design and evaluation in ad hoc networks are
        discussed}, 
  www_section = {ad-hoc networks, energy consumption, IEEE 802.11}, 
  isbn = {0-7803-7016-3}, 
  doi = {10.1109/INFCOM.2001.916651}, 
  url = {http://ieeexplore.ieee.org/xpl/freeabs_all.jsp?arnumber=916651}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/infocom01investigating.pdf},
}
2001_4
@book{2001_4,
  title = {Multiparty Computation from Threshold Homomorphic Encryption}, 
  author = {Cramer, Ronald and Damg{\'a}rd, Ivan and Nielsen, JesperB}, 
  booktitle = {Advances in Cryptology {\textemdash} EUROCRYPT 2001}, 
  organization = {Springer Berlin Heidelberg}, 
  volume = {2045}, 
  year = {2001}, 
  pages = {280--300}, 
  editor = {Pfitzmann, Birgit}, 
  publisher = {Springer Berlin Heidelberg}, 
  series = {Lecture Notes in Computer Science}, 
  abstract = {We introduce a new approach to multiparty computation (MPC) basing it on
        homomorphic threshold crypto-systems. We show that given keys for any
        sufficiently efficient system of this type,general MPC protocols for n parties
        can be devised which are secure against an active adversary that corrupts any
        minority of the parties. The total number of bits broadcast is O(nk|C|),where k
        is the security parameter and |C| is the size of a (Boolean) circuit computing
        the function to be securely evaluated. An earlier proposal by Franklin and Haber
        with the same complexity was only secure for passive adversaries,while all
        earlier protocols with active security had complexity at least quadratic in n. We
        give two examples of threshold cryptosystems that can support our construction
        and lead to the claimed complexities}, 
  isbn = {978-3-540-42070-5}, 
  doi = {10.1007/3-540-44987-6_18}, 
  url = {http://dx.doi.org/10.1007/3-540-44987-6_18}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/MultiPartyComputation2001Cramer.pdf},
%%%%% ERROR: Missing field
% www_section = {?????},
}
2001_5
@book{2001_5,
  title = {Peer-To-Peer: Harnessing the Power of Disruptive Technologies -- Chapter 12:
        Free Haven}, 
  author = {Roger Dingledine and Michael J. Freedman and David Molnar}, 
  organization = {O'Reilly Media}, 
  year = {2001}, 
  editor = {Andy oram}, 
  publisher = {O'Reilly Media}, 
  abstract = {Description of the problems that arise when one tries to combine anonymity
        and accountability. Note that the Free Haven design described here charges for
        storing data in the network (downloads are free), whereas in GNUnet adding data
        is free and only the downloads are considered as utilization}, 
%%%%% ERROR: Missing field
% www_section = {?????},
}
2001_6
@book{2001_6,
  title = {The Theory of Incentives: The Principal-Agent Model}, 
  author = {Jean-Jacques Laffont and David Martimort}, 
  organization = {Princeton University Press}, 
  year = {2001}, 
  address = {Princeton, New Jersey, USA}, 
  pages = {0--360}, 
  publisher = {Princeton University Press}, 
  abstract = {Economics has much to do with incentives--not least, incentives to work hard,
        to produce quality products, to study, to invest, and to save. Although Adam
        Smith amply confirmed this more than two hundred years ago in his analysis of
        sharecropping contracts, only in recent decades has a theory begun to emerge to
        place the topic at the heart of economic thinking. In this book, Jean-Jacques
        Laffont and David Martimort present the most thorough yet accessible introduction
        to incentives theory to date. Central to this theory is a simple question as
        pivotal to modern-day management as it is to economics research: What makes
        people act in a particular way in an economic or business situation? In seeking
        an answer, the authors provide the methodological tools to design institutions
        that can ensure good incentives for economic agents. This book focuses on the
        principal-agent model, the "simple" situation where a principal, or company,
        delegates a task to a single agent through a contract--the essence of management
        and contract theory. How does the owner or manager of a firm align the objectives
        of its various members to maximize profits? Following a brief historical overview
        showing how the problem of incentives has come to the fore in the past two
        centuries, the authors devote the bulk of their work to exploring principal-agent
        models and various extensions thereof in light of three types of information
        problems: adverse selection, moral hazard, and non-verifiability. Offering an
        unprecedented look at a subject vital to industrial organization, labor
        economics, and behavioral economics, this book is set to become the definitive
        resource for students, researchers, and others who might find themselves
        pondering what contracts, and the incentives they embody, are really all about}, 
  www_section = {economics, principal-agent model}, 
  isbn = {9780691091846}, 
  url = {http://press.princeton.edu/chapters/i7311.html}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Laffont\%20\%26\%20Martimort\%20-\%20The\%20Theory\%20of\%20Incentives.pdf},
}
2002_0
@conference{2002_0,
  title = {Cooperative Backup System}, 
  author = {Sameh Elnikety and Mark Lillibridge and Mike Burrows and Willy Zwaenepoel}, 
  booktitle = {In The USENIX Conf. on File and Storage Technologies}, 
  year = {2002}, 
  abstract = {This paper presents the design of a novel backup system built on top of a
        peer-to-peer architecture with minimal supporting infrastructure. The system can
        be deployed for both large-scale and small-scale peer-to-peer overlay networks.
        It allows computers connected to the Internet to back up their data
        cooperatively. Each computer has a set of partner computers and stores its backup
        data distributively among those partners. In return, such a way as to achieve
        both fault-tolerance and high reliability. This form of cooperation poses several
        interesting technical challenges because these computers have independent failure
        modes, do not trust each other, and are subject to third party attacks}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/elnikety.pdf}, 
%%%%% ERROR: Missing field
% www_section = {?????},
}
2002_1
@conference{2002_1,
  title = {Finite-length analysis of low-density parity-check codes on the binary erasure
        channel}, 
  author = {Changyan Di and David Proietti and I. Emre Telatar and Thomas J. Richardson and
        R{\"u}diger L. Urbanke}, 
  booktitle = {Finite-length analysis of low-density parity-check codes on the binary
        erasure channel}, 
  year = {2002}, 
  month = {January}, 
  abstract = {In this paper, we are concerned with the finite-length analysis of
        low-density parity-check (LDPC) codes when used over the binary erasure channel
        (BEC). The main result is an expression for the exact average bit and block
        erasure probability for a given regular ensemble of LDPC codes when decoded
        iteratively. We also give expressions for upper bounds on the average bit and
        block erasure probability for regular LDPC ensembles and the standard random
        ensemble under maximum-likelihood (ML) decoding. Finally, we present what we
        consider to be the most important open problems in this area}, 
  www_section = {BEC, coding theory, low-density parity-check, maximum-likelihood}, 
  doi = {10.1109/TIT.2002.1003839}, 
  url = {http://ieeexplore.ieee.org/xpl/freeabs_all.jsp?arnumber=1003839}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Finite-length\%20analysis\%20of\%20low-density\%20parity-check\%20codes\%20on.pdf},
}
2002_2_GNet
@article{2002_2_GNet,
  title = {The GNet Whitepaper}, 
  author = {Krista Bennett and Tiberius Stef and Christian Grothoff and Tzvetan Horozov and
        Ioana Patrascu}, 
  journal = {unknown}, 
  institution = {Purdue University}, 
  year = {2002}, 
  month = {June}, 
  type = {Technical report}, 
  abstract = {This paper describes GNet, a reliable anonymous distributed backup system
        with reasonable defenses against malicious hosts and low overhead in traffic and
        CPU time. The system design is described and compared to other publicly used
        services with similar goals. Additionally, the implementation and the protocols
        of GNet are presented}, 
  www_section = {anonymity, economics, encoding, GNUnet, obsolete database}, 
  keywords = {anonymity, economics, encoding, GNUnet, obsolete database}, 
  www_tags = {selected}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/main.pdf}, 
}
2002_3
@book{2002_3,
  title = {Mnemosyne: Peer-to-Peer Steganographic Storage}, 
  author = {Hand, Steven and Roscoe, Timothy}, 
  booktitle = {Peer-to-Peer Systems}, 
  organization = {Springer Berlin Heidelberg}, 
  volume = {2429}, 
  year = {2002}, 
  pages = {130--140}, 
  editor = {Druschel, Peter and Kaashoek, Frans and Rowstron, Antony}, 
  publisher = {Springer Berlin Heidelberg}, 
  series = {Lecture Notes in Computer Science}, 
  isbn = {978-3-540-44179-3}, 
  doi = {10.1007/3-540-45748-8_13}, 
  url = {http://dx.doi.org/10.1007/3-540-45748-8_13}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/107.pdf}, 
%%%%% ERROR: Missing field
% www_section = {?????},
}
2003_0
@conference{2003_0,
  title = {Bootstrapping a Distributed Computational Economy with Peer-to-Peer Bartering}, 
  author = {Chun, Brent and Yun Fu and Vahdat, Amin}, 
  booktitle = {Proceedings of the 1st Worshop on Economics of Peer-to-Peer Systems}, 
  year = {2003}, 
  month = {June}, 
  address = {Berkeley, CA, USA}, 
  www_section = {bartering, distributed computational economies, peer-to-peer bartering,
        resource discovery, resource exchange, resource peering}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Workshop\%20on\%20Economics\%20of\%20P2P\%20Systems\%2703\%20-\%20Chun\%2C\%20Fu\%20\%26\%20Vahdat.pdf},
}
2003_1
@conference{2003_1,
  title = {The effect of rumor spreading in reputation systems for mobile ad-hoc networks}, 
  author = {Sonja Buchegger and Jean-Yves Le Boudec}, 
  booktitle = {Proceedings of WiOpt {\textquoteleft}03: Modeling and Optimization in
        Mobile, Ad Hoc and Wireless Networks}, 
  year = {2003}, 
  month = {March}, 
  address = {Sophia-Antipolis, France}, 
  abstract = {Mobile ad-hoc networks rely on the cooperation of nodes for routing and
        forwarding. For individual nodes there are however several advantages resulting
        from noncooperation, the most obvious being power saving. Nodes that act
        selfishly or even maliciously pose a threat to availability in mobile adhoc
        networks. Several approaches have been proposed to detect noncooperative nodes.
        In this paper, we investigate the e$\#$ect of using rumors with respect to the
        detection time of misbehaved nodes as well as the robustness of the reputation
        system against wrong accusations. We propose a Bayesian approach for reputation
        representation, updates, and view integration. We also present a mechanism to
        detect and exclude potential lies. The simulation results indicate that by using
        this Bayesian approach, the reputation system is robust against slander while
        still benefitting from the speed-up in detection time provided by the use of
        rumors}, 
  www_section = {mobile Ad-hoc networks, reputation, reputation system, rumor}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/WiOpt\%2703\%20-\%20Buchegger\%20\%26\%20Le\%20Boudec\%20-\%20Reputation\%20Systems.pdf},
}
2003_10
@article{2003_10,
  title = {On the Topology of Overlay-Networks}, 
  author = {Thomas Fuhrmann}, 
  journal = {unknown}, 
  year = {2003}, 
%%%%% ERROR: Non-ASCII characters: ''\x02''
  abstract = {Random-graph models are about to become an important tool in the study of
        wireless ad-hoc and sensor-networks, peer-to-peer networks, and, generally,
        overlay-networks. Such models provide a theoretical basis to assess the
        capabilities of certain networks, and guide the design of new protocols.
        Especially the recently proposed models for so-called small-world networks
        receive much attention from the networking community. This paper proposes the use
        of two more mathematical concepts for the analysis of network topologies,
        dimension and curvature. These concepts can intuitively be applied to, e.g.,
        sensor-networks. But they can also be sensibly dened for certain other
        random-graph models. The latter is non-trivial since such models may describe
        purely virtual networks that do not inherit properties from an underlying
        physical world. Analysis of a random-graph model for Gnutella-like
        overlay-networks yields strong indications that such networks might be
        characterized as a sphere with fractal dimension}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/fuhrmann03topology.pdf},
%%%%% ERROR: Missing field
% www_section = {?????},
}
2003_2
@article{2003_2,
  title = {The evolution of altruistic punishment}, 
  author = {Robert Boyd and Herbert Gintis and Samuel Bowles and Peter J. Richerson}, 
  journal = {Proceedings of the National Academy of Sciences of the USA}, 
  volume = {100}, 
  year = {2003}, 
  month = {March}, 
  pages = {3531--3535}, 
  abstract = {Both laboratory and field data suggest that people punish noncooperators even
        in one-shot interactions. Although such {\textquotedblleft}altruistic
        punishment{\textquotedblright} may explain the high levels of cooperation in
        human societies, it creates an evolutionary puzzle: existing models suggest that
        altruistic cooperation among nonrelatives is evolutionarily stable only in small
        groups. Thus, applying such models to the evolution of altruistic punishment
        leads to the prediction that people will not incur costs to punish others to
        provide benefits to large groups of nonrelatives. However, here we show that an
        important asymmetry between altruistic cooperation and altruistic punishment
        allows altruistic punishment to evolve in populations engaged in one-time,
        anonymous interactions. This process allows both altruistic punishment and
        altruistic cooperation to be maintained even when groups are large and other
        parameter values approximate conditions that characterize cultural evolution in
        the small-scale societies in which humans lived for most of our prehistory}, 
  www_section = {altruistic cooperation, altruistic punishment, cooperation, human society,
        nonrelatives}, 
  doi = {10.1073/pnas.0630443100}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/PNAS\%20\%282003\%29\%20-\%20The\%20evolution\%20of\%20altruistic\%20punishment.pdf},
}
2003_3
@article{2003_3,
  title = {Extremum Feedback with Partial Knowledge}, 
  author = {Thomas Fuhrmann and J{\"o}rg Widmer}, 
  journal = {unknown}, 
  volume = {Volume 2816/2003}, 
  year = {2003}, 
  abstract = {A scalable feedback mechanism to solicit feedback from a potentially very
        large group of networked nodes is an important building block for many network
        protocols. Multicast transport protocols use it for negative acknowledgements and
        for delay and packet loss determination. Grid computing and peer-to-peer
        applications can use similar approaches to find nodes that are, at a given moment
        in time, best suited to serve a request. In sensor networks, such mechanisms
        allow to report extreme values in a resource efficient way. In this paper we
        analyze several extensions to the exponential feedback algorithm [5,6] that
        provide an optimal way to collect extreme values from a potentially very large
        group of networked nodes. In contrast to prior work, we focus on how knowledge
        about the value distribution in the group can be used to optimize the feedback
        process. We describe the trade-offs that have to be decided upon when using these
        extensions and provide additional insight into their performance by means of
        simulation. Furthermore, we briefly illustrate how sample applications can
        benefit from the proposed mechanisms}, 
  isbn = {978-3-540-20051-2}, 
  issn = {0302-9743}, 
  doi = {10.1007/b13249}, 
  url = {http://www.springerlink.com/content/bvelyaew4ukl4aau/}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/fuhrmann03feedback.pdf},
%%%%% ERROR: Missing field
% www_section = {?????},
}
2003_4
@conference{2003_4,
  title = {A game theoretic framework for incentives in P2P systems}, 
  author = {Chiranjeeb Buragohain and Dvyakant Agrawal and Subhash Suri}, 
  booktitle = {Proceedings of the 3rd International Conference on Peer-to-Peer Computing}, 
  organization = {IEEE Computer Society}, 
  year = {2003}, 
  month = {September}, 
  address = {Link{\"o}ping, Sweden}, 
  pages = {48--56}, 
  publisher = {IEEE Computer Society}, 
  abstract = {Peer-to-peer (P2P) networks are self-organizing, distributed systems, with no
        centralized authority or infrastructure. Because of the voluntary participation,
        the availability of resources in a P2P system can be highly variable and
        unpredictable. We use ideas from game theory to study the interaction of
        strategic and rational peers, and propose a differential service-based incentive
        scheme to improve the system's performance}, 
  www_section = {network, P2P, peer-to-peer networking, system performance}, 
  isbn = {0-7695-2023-5}, 
  doi = {10.1109/PTP.2003.1231503}, 
  url = {10.1109/PTP.2003.1231503}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/P2P\%2703\%20-\%20Buragohain\%2C\%20Agrawal\%20\%26\%20Suri\%20-\%20Incentives\%20in\%20P2P\%20systems.pdf},
}
2003_5
@conference{2003_5,
  title = {Incentives build robustness in BitTorrent}, 
  author = {Bram Cohen}, 
  booktitle = {NetEcon'03--Proceedings of the Workshop on Economics of Peer-to-Peer
        Systems}, 
  year = {2003}, 
  month = {June}, 
  address = {Berkeley, CA, USA}, 
  abstract = {The BitTorrent file distribution system uses tit-for-tat as a method to
        seeking pareto efficiency. It achieves a higher level of robustness and resource
        utilization than any currently known cooperative technique. We explain what
        BitTorrent does, and how economic methods are used to achieve that goal}, 
  www_section = {BitTorrent, resource utilization, robustness}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/NetEcon\%2703\%20-\%20Cohen\%20-\%20Incentives\%20build\%20robustness\%20in\%20BitTorrent.pdf},
}
2003_6
@conference{2003_6,
  title = {KARMA: a Secure Economic Framework for P2P Resource Sharing}, 
  author = {Vivek Vishnumurthy and Sangeeth Chandrakumar and Emin G{\"u}n Sirer}, 
  booktitle = {P2PECON'05. Proceedings of the 3rd Workshop on Economics of Peer-to-Peer
        Systems}, 
  year = {2003}, 
  month = {June}, 
  address = {Berkeley, CA, USA}, 
  abstract = {Peer-to-peer systems are typically designed around the assumption that all
        peers will willingly contribute resources to a global pool. They thus suffer from
        freeloaders,that is, participants who consume many more resources than they
        contribute. In this paper, we propose a general economic framework for avoiding
        freeloaders in peer-to-peer systems. Our system works by keeping track of the
        resource consumption and resource contributionof each participant. The overall
        standing of each}, 
  www_section = {economic framework, freeloader, karma, p2p resource sharing}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/P2PECON\%2705\%20-\%20KARMA.pdf},
}
2003_7
@book{2003_7,
  title = {Koorde: A Simple degree-optimal distributed hash table}, 
  author = {Frans M. Kaashoek and David Karger}, 
  booktitle = {Peer-to-Peer Systems II}, 
  organization = {Springer}, 
  volume = {2735/2003}, 
  year = {2003}, 
  address = {Berlin / Heidelberg}, 
  pages = {98--107}, 
  publisher = {Springer}, 
  series = {Lecture Notes in Computer Science}, 
  abstract = {Koorde is a new distributed hash table (DHT) based on Chord 15 and the de
        Bruijn graphs 2. While inheriting the simplicity of Chord, Koorde meets various
        lower bounds, such as O(log n) hops per lookup request with only 2 neighbors per
        node (where n is the number of nodes in the DHT), and O(log n/log log n) hops per
        lookup request with O(log n) neighbors per node}, 
  www_section = {de Bruijn graph, distributed hash table, Koorde}, 
  doi = {10.1007/b11823}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/koorde.pdf}, 
}
2003_8
@conference{2003_8,
  title = {Revealing Information While Preserving Privacy}, 
  author = {Dinur, Irit and Nissim, Kobbi}, 
  booktitle = {Proceedings of the Twenty-second ACM SIGMOD-SIGACT-SIGART Symposium on
        Principles of Database Systems}, 
  organization = {ACM}, 
  year = {2003}, 
  address = {New York, NY, USA}, 
  publisher = {ACM}, 
%%%%% ERROR: Non-ASCII characters: ''⊆Ω√√≈√''
  abstract = {We examine the tradeoff between privacy and usability of statistical
        databases. We model a statistical database by an n-bit string d1 ,.., dn , with a
        query being a subset q ⊆ [n] to be answered by summation of values which belong
        to q. Our main result is a polynomial reconstruction algorithm of data from noisy
        (perturbed) subset sums. Applying this reconstruction algorithm to statistical
        databases we show that in order to achieve privacy one has to add perturbation of
        magnitude Ω (√ n). That is, smaller perturbation always results in a strong
        violation of privacy. We show that this result is tight by exemplifying access
        algorithms for statistical databases that preserve privacy while adding
        perturbation of magnitude O (√ n). For time-T bounded adversaries we demonstrate
        a privacy-preserving access algorithm whose perturbation magnitude is ≈ √T}, 
  www_section = {data reconstruction, integrity and security, subset-sums with noise}, 
  isbn = {1-58113-670-6}, 
  doi = {10.1145/773153.773173}, 
  url = {http://doi.acm.org/10.1145/773153.773173}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/RevelaingInformation2003Dinur.pdf},
}
2003_9
@conference{2003_9,
  title = {Scalable Application-level Anycast for Highly Dynamic Groups}, 
  author = {Miguel Castro and Peter Druschel and Anne-Marie Kermarrec and Antony Rowstron}, 
  booktitle = {NGC'03 Networked Group Communication, Fifth International COST264 Workshop}, 
  organization = {Springer}, 
  volume = {2816}, 
  year = {2003}, 
  month = {September}, 
  address = {Munich, Germany}, 
  pages = {47--57}, 
  publisher = {Springer}, 
  series = {Lecture Notes in Computer Science, 2003}, 
  abstract = {We present an application-level implementation of anycast for highly dynamic
        groups. The implementation can handle group sizes varying from one to the whole
        Internet, and membership maintenance is efficient enough to allow members to join
        for the purpose of receiving a single message. Key to this efficiency is the use
        of a proximity-aware peer-to-peer overlay network for decentralized, lightweight
        group maintenance; nodes join the overlay once and can join and leave many groups
        many times to amortize the cost of maintaining the overlay. An anycast
        implementation with these properties provides a key building block for
        distributed applications. In particular, it enables management and location of
        dynamic resources in large scale peer-to-peer systems. We present several
        resource management applications that are enabled by our implementation}, 
  www_section = {anycast, application-level, highly dynamic groups, peer-to-peer
        networking}, 
  doi = {10.1007/978-3-540-39405-1_5}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/NGC\%2703\%20-\%20Scalable\%20Application-level\%20Anycast\%20.pdf},
}
2004.Pang.imc.dns
@conference{2004.Pang.imc.dns,
  title = {Availability, Usage, and Deployment Characteristics of the Domain Name System}, 
  author = {Jeffrey Pang and James Hendricks and Aditya Akella and Bruce Maggs and Roberto
        De Prisco and Seshan, Srinivasan}, 
  booktitle = {IMC'04--Proceedings of the 4th ACM SIGCOMM Conference on Internet
        Measurement}, 
  organization = {ACM}, 
  year = {2004}, 
  month = {October}, 
  address = {Taormina, Sicily, Italy}, 
  publisher = {ACM}, 
  abstract = {The Domain Name System (DNS) is a critical part of the Internet's
        infrastructure, and is one of the few examples of a robust, highly-scalable, and
        operational distributed system. Although a few studies have been devoted to
        characterizing its properties, such as its workload and the stability of the
        top-level servers, many key components of DNS have not yet been examined. Based
        on large-scale measurements taken fromservers in a large content distribution
        network, we present a detailed study of key characteristics of the DNS
        infrastructure, such as load distribution, availability, and deployment patterns
        of DNS servers. Our analysis includes both local DNS servers and servers in the
        authoritative hierarchy. We find that (1) the vast majority of users use a small
        fraction of deployed name servers, (2) the availability of most name servers is
        high, and (3) there exists a larger degree of diversity in local DNS server
        deployment and usage than for authoritative servers. Furthermore, we use our DNS
        measurements to draw conclusions about federated infrastructures in general. We
        evaluate and discuss the impact of federated deployment models on future systems,
        such as Distributed Hash Tables}, 
  www_section = {availability, DNS, federated}, 
  isbn = {1-58113-821-0}, 
  doi = {http://doi.acm.org/10.1145/1028788.1028790}, 
  url = {http://doi.acm.org/10.1145/1028788.1028790}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/IMC\%2704\%20-\%20Availability\%2C\%20Usage\%2C\%20and\%20Deployment\%20Characteristics\%20of\%20the\%20DNS.pdf},
}
2004_0
@booklet{2004_0,
  title = {Apres-a system for anonymous presence}, 
  author = {Laurie, Ben}, 
  year = {2004}, 
  abstract = {If Alice wants to know when Bob is online, and they don't want anyone else to
        know their interest in each other, what do they do? Once they know they are both
        online, they would like to be able to exchange messages, send files, make phone
        calls to each other, and so forth, all without anyone except them knowing they
        are doing this. Apres is a system that attempts to make this possible}, 
  www_section = {anonymous presence, presence}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/apres.pdf}, 
}
2004_1
@article{2004_1,
  title = {A construction of locality-aware overlay network: mOverlay and its performance}, 
  author = {Xin Yan Zhang and Qian Zhang and Zhang, Zhensheng and Gang Song and Wenwu Zhu}, 
  journal = {IEEE Journal on Selected Areas in Communications}, 
  volume = {22}, 
  year = {2004}, 
  month = {January}, 
  pages = {18--28}, 
  abstract = {There are many research interests in peer-to-peer (P2P) overlay
        architectures. Most widely used unstructured P2P networks rely on central
        directory servers or massive message flooding, clearly not scalable. Structured
        overlay networks based on distributed hash tables (DHT) are expected to eliminate
        flooding and central servers, but can require many long-haul message deliveries.
        An important aspect of constructing an efficient overlay network is how to
        exploit network locality in the underlying network. We propose a novel mechanism,
        mOverlay, for constructing an overlay network that takes account of the locality
        of network hosts. The constructed overlay network can significantly decrease the
        communication cost between end hosts by ensuring that a message reaches its
        destination with small overhead and very efficient forwarding. To construct the
        locality-aware overlay network, dynamic landmark technology is introduced. We
        present an effective locating algorithm for a new host joining the overlay
        network. We then present a theoretical analysis and simulation results to
        evaluate the network performance. Our analysis shows that the overhead of our
        locating algorithm is O(logN), where N is the number of overlay network hosts.
        Our simulation results show that the average distance between a pair of hosts in
        the constructed overlay network is only about 11\% of the one in a traditional,
        randomly connected overlay network. Network design guidelines are also provided.
        Many large-scale network applications, such as media streaming, application-level
        multicasting, and media distribution, can leverage mOverlay to enhance their
        performance}, 
  www_section = {distributed hash table, flooding attacks, overlay networks, P2P}, 
  url = {http://kmweb.twbbs.org/drupal/node/13}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/6-914.ppt}, 
}
2004_10
@article{2004_10,
  title = {Personalized Web search for improving retrieval effectiveness}, 
  author = {Fang Liu and Yu, C. and Weiyi Meng}, 
  journal = {Knowledge and Data Engineering, IEEE Transactions on}, 
  volume = {16}, 
  year = {2004}, 
  month = {January}, 
  pages = {28--40}, 
  abstract = {Current Web search engines are built to serve all users, independent of the
        special needs of any individual user. Personalization of Web search is to carry
        out retrieval for each user incorporating his/her interests. We propose a novel
        technique to learn user profiles from users' search histories. The user profiles
        are then used to improve retrieval effectiveness in Web search. A user profile
        and a general profile are learned from the user's search history and a category
        hierarchy, respectively. These two profiles are combined to map a user query into
        a set of categories which represent the user's search intention and serve as a
        context to disambiguate the words in the user's query. Web search is conducted
        based on both the user query and the set of categories. Several profile learning
        and category mapping algorithms and a fusion algorithm are provided and
        evaluated. Experimental results indicate that our technique to personalize Web
        search is both effective and efficient}, 
  www_section = {BANDWIDTH, category hierarchy, category mapping algorithms, Displays,
        fusion algorithm, History, human factors, information filtering, information
        retrieval, libraries, personalized Web search, profile learning, retrieval
        effectiveness, search engines, search intention, special needs, user interfaces,
        user profiles, user search histories, Web search, Web search engines}, 
  issn = {1041-4347}, 
  doi = {10.1109/TKDE.2004.1264820}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/PersonalizedWebSearch2004Liu.pdf},
}
2004_11
@booklet{2004_11,
  title = {POSIX--Portable Operating System Interface}, 
  author = {The Open Group and IEEE}, 
  journal = {The Open Group Technical Standard Base Specifications, Issue 6}, 
  number = {IEEE Std 1003.n}, 
  year = {2004}, 
  www_section = {API, asynchronous, built-in utility, CPU, file access control mechanism,
        input/output (I/O), job control, network, portable operating system interface
        (POSIX), shell, stream, synchronous}, 
  url = {http://pubs.opengroup.org/onlinepubs/009695399/}, 
}
2004_12
@conference{2004_12,
  title = {A Probabilistic Approach to Predict Peers' Performance in P2P Networks}, 
  author = {Zoran Despotovic and Karl Aberer}, 
  booktitle = {CIA 2004. Cooperative Information Agents VIII, 8th International Workshop}, 
  organization = {Springer}, 
  volume = {3191}, 
  year = {2004}, 
  month = {September}, 
  address = {Erfurt, Germany}, 
  pages = {62--76}, 
  publisher = {Springer}, 
  series = {Lecture Notes in Computer Science}, 
  abstract = {The problem of encouraging trustworthy behavior in P2P online communities by
        managing peers' reputations has drawn a lot of attention recently. However, most
        of the proposed solutions exhibit the following two problems: huge implementation
        overhead and unclear trust related model semantics. In this paper we show that a
        simple probabilistic technique, maximum likelihood estimation namely, can reduce
        these two problems substantially when employed as the feedback aggregation
        strategy. Thus, no complex exploration of the feedback is necessary. Instead,
        simple, intuitive and efficient probabilistic estimation methods suffice}, 
  www_section = {p2p network, peer performance}, 
  doi = {10.1007/978-3-540-30104-2_6}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/CIA\%2704\%20-\%20Despotovic\%20\%26\%20Aberer\%20-\%20Peers\%27\%20performance\%20in\%20P2P\%20networks.pdf},
}
2004_13
@article{2004_13,
  title = {Scalable byzantine agreement}, 
  author = {Lewis, Scott and Saia, Jared}, 
  journal = {unknown}, 
  year = {2004}, 
  abstract = {This paper gives a scalable protocol for solving the Byzantine agreement
        problem. The protocol is scalable in the sense that for Byzantine agreement over
        n processors, each processor sends and receives only O(log n) messages in
        expectation. To the best of our knowledge this is the first result for the
        Byzantine agreement problem where each processor sends and receives o(n)
        messages. The protocol uses randomness and is correct with high probability. 1 It
        can tolerate any fraction of faulty processors which is strictly less than 1/6.
        Our result partially answers the following question posed by Kenneth Birman:
        {\textquotedblleft}How scalable are the traditional solutions to problems such as
        Consensus or Byzantine Agreement?{\textquotedblright} [5]}, 
  www_section = {byzantine agreement}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/sba.pdf}, 
}
2004_14
@mastersthesis{2004_14,
  title = {Signaling and Networking in Unstructured Peer-to-Peer Networks}, 
  author = {R{\"u}diger Schollmeier}, 
  school = {Technische Universit{\"a}t M{\"u}nchen}, 
  year = {2004}, 
  month = {September}, 
  address = {Munich, Germany}, 
  pages = {0--177}, 
  type = {Dissertation}, 
  abstract = {This work deals with the efficiency of Peer-to-Peer (P2P) networks, which are
        distributed and self-organizing overlay networks. We contribute to their
        understanding and design by using new measurement techniques, simulations and
        analytical methods. In this context we first present measurement methods and
        results of P2P networks concerning traffic and topology characteristics as well
        as concerning user behavior. Based on these results we develop stochastic models
        to describe the user behavior, the traffic and the topology of P2P networks
        analytically. Using the results of our measurements and analytical
        investigations, we develop new P2P architectures to improve the efficiency of P2P
        networks concerning their topology and their signaling traffic. Finally we verify
        our results for the new architectures by measurements as well as computer-based
        simulations on different levels of detail}, 
  www_section = {application model, communication network, compression, content
        availability, cross layer communication, generating functions, overlay networks,
        random graph theory, self-organization, signaling traffic, simulation, topology
        measurement, traffic measurement, user model}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Schollmeier\%20-\%20Signaling\%20and\%20networking\%20in\%20unstructured\%20p2p\%20networks.pdf},
}
2004_2
@mastersthesis{2004_2,
  title = {The Decentralised Coordination of Self-Adaptive Components for Autonomic
        Distributed Systems}, 
  author = {Jim Dowling}, 
  school = {University of Dublin}, 
  volume = {Doctor of Philosophy}, 
  year = {2004}, 
  month = {October}, 
  address = {Dublin, Ireland}, 
  pages = {0--214}, 
  www_section = {autonomic distributed system, descentralised coordination}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Thesis\%20-\%20Autonomic\%20distributed\%20systems.pdf},
}
2004_3
@conference{2004_3,
  title = {Designing Incentive mechanisms for peer-to-peer systems}, 
  author = {John Chuang}, 
  booktitle = {GECON 2004. 1st IEEE International Workshop on Grid Economics and Business
        Models}, 
  organization = {IEEE Computer Society}, 
  year = {2004}, 
  month = {April}, 
  address = {Seoul, South Corea}, 
  pages = {67--81}, 
  publisher = {IEEE Computer Society}, 
  abstract = {From file-sharing to mobile ad-hoc networks, community networking to
        application layer overlays, the peer-to-peer networking paradigm promises to
        revolutionize the way we design, build and use the communications network of
        tomorrow, transform the structure of the communications industry, and challenge
        our understanding of markets and democracies in a digital age. The fundamental
        premise of peer-to-peer systems is that individual peers voluntarily contribute
        resources to the system. We discuss some of the research opportunities and
        challenges in the design of incentive mechanisms for P2P systems}, 
  www_section = {incentives, P2P, peer-to-peer networking}, 
  isbn = {0-7803-8525-X}, 
  doi = {10.1109/GECON.2004.1317584}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/GECON\%2704\%20-\%20Designing\%20incentive\%20mechanisms\%20for\%20p2p\%20systems.pdf},
}
2004_4
@book{2004_4,
  title = {Efficient Private Matching and Set Intersection}, 
  author = {Freedman, MichaelJ. and Nissim, Kobbi and Pinkas, Benny}, 
  booktitle = {Advances in Cryptology--EUROCRYPT 2004}, 
  organization = {Springer Berlin Heidelberg}, 
  volume = {3027}, 
  year = {2004}, 
  pages = {1--19}, 
  editor = {Cachin, Christian and Camenisch, Jan L}, 
  publisher = {Springer Berlin Heidelberg}, 
  series = {Lecture Notes in Computer Science}, 
  abstract = {We consider the problem of computing the intersection of private datasets of
        two parties, where the datasets contain lists of elements taken from a large
        domain. This problem has many applications for online collaboration. We present
        protocols, based on the use of homomorphic encryption and balanced hashing, for
        both semi-honest and malicious environments. For lists of length k, we obtain
        O(k) communication overhead and O(k ln ln k) computation. The protocol for the
        semi-honest environment is secure in the standard model, while the protocol for
        the malicious environment is secure in the random oracle model. We also consider
        the problem of approximating the size of the intersection, show a linear
        lower-bound for the communication overhead of solving this problem, and provide a
        suitable secure protocol. Lastly, we investigate other variants of the matching
        problem, including extending the protocol to the multi-party setting as well as
        considering the problem of approximate matching}, 
  isbn = {978-3-540-21935-4}, 
  doi = {10.1007/978-3-540-24676-3_1}, 
  url = {http://dx.doi.org/10.1007/978-3-540-24676-3_1}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/EffecitvePrivateMatching2004Freedman.pdf},
%%%%% ERROR: Missing field
% www_section = {?????},
}
2004_5
@article{2004_5,
  title = {Enhancing Web privacy and anonymity in the digital era}, 
  author = {Stefanos Gritzalis}, 
  journal = {Information Management \& Computer Security}, 
  volume = {12}, 
  year = {2004}, 
  month = {January}, 
  pages = {255--287}, 
  type = {survey}, 
  abstract = {This paper presents a state-of-the-art review of the Web privacy and
        anonymity enhancing security mechanisms, tools, applications and services, with
        respect to their architecture, operational principles and vulnerabilities.
        Furthermore, to facilitate a detailed comparative analysis, the appropriate
        parameters have been selected and grouped in classes of comparison criteria, in
        the form of an integrated comparison framework. The main concern during the
        design of this framework was to cover the confronted security threats, applied
        technological issues and users' demands satisfaction. GNUnet's Anonymity Protocol
        (GAP), Freedom, Hordes, Crowds, Onion Routing, Platform for Privacy Preferences
        (P3P), TRUSTe, Lucent Personalized Web Assistant (LPWA), and Anonymizer have been
        reviewed and compared. The comparative review has clearly highlighted that the
        pros and cons of each system do not coincide, mainly due to the fact that each
        one exhibits different design goals and thus adopts dissimilar techniques for
        protecting privacy and anonymity}, 
  www_section = {anonymity, GNUnet, onion routing}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/p255.pdf}, 
}
2004_6
@book{2004_6,
  title = {Group Spreading: A Protocol for Provably Secure Distributed Name Service}, 
  author = {Awerbuch, Baruch and Scheideler, Christian}, 
  booktitle = {Automata, Languages and Programming}, 
  organization = {Springer Berlin Heidelberg}, 
  volume = {3142}, 
  year = {2004}, 
  pages = {183--195}, 
  editor = {D{\'\i}az, Josep and Karhum{\"a}ki, Juhani and Lepist{\"o}, Arto and Sannella,
        Donald}, 
  publisher = {Springer Berlin Heidelberg}, 
  series = {Lecture Notes in Computer Science}, 
  isbn = {978-3-540-22849-3}, 
  doi = {10.1007/978-3-540-27836-8_18}, 
  url = {http://dx.doi.org/10.1007/978-3-540-27836-8_18}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/p_icalp04_0.pdf}, 
%%%%% ERROR: Missing field
% www_section = {?????},
}
2004_7
@book{2004_7,
  title = {An Introduction to Auction Theory}, 
  author = {Flavio M. Menezes and Paulo K. Monteiro}, 
  organization = {Oxford University Press}, 
  year = {2004}, 
  edition = {1st}, 
  pages = {0--199}, 
  publisher = {Oxford University Press}, 
  abstract = {This book presents an in-depth discussion of the auction theory. It
        introduces the concept of Bayesian Nash equilibrium and the idea of studying
        auctions as games. Private, common, and affiliated values models and multi-object
        auction models are described. A general version of the Revenue Equivalence
        Theorem is derived and the optimal auction is characterized to relate the field
        of mechanism design to auction theory}, 
  www_section = {affiliated values model, auction theory, Bayesian Nash equilibrium, common
        values model, multiple objects, private values model, Revenue Equivalence
        Theorem}, 
  isbn = {9780199275984}, 
  doi = {10.1093/019927598X.001.0001}, 
  url = {http://www.oxfordscholarship.com/view/10.1093/019927598X.001.0001/acprof-9780199275984},
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Menezes\%20\%26\%20Monteiro\%20-\%20An\%20Introduction\%20to\%20Auction\%20Theory.pdf},
}
2004_8
@conference{2004_8,
  title = {A Peer-to-Peer File Sharing System for Wireless Ad-Hoc Networks}, 
  author = {unknown}, 
  booktitle = {A Peer-to-Peer File Sharing System for Wireless Ad-Hoc Networks}, 
  year = {2004}, 
  editor = {Hasan S{\"o}zer and Metin Kekkalmaz and Ibrahim K{\"o}rpeoglu}, 
  abstract = {File sharing in wireless ad-hoc networks in a peer to peer manner imposes
        many challenges that make conventional peer-to-peer systems operating on
        wire-line networks inapplicable for this case. Information and workload
        distribution as well as routing are major problems for members of a wireless
        ad-hoc network, which are only aware of their neighborhood. In this paper we
        propose a system that solves peer-to-peer filesharing problem for wireless ad-hoc
        networks. Our system works according to peer-to-peer principles, without
        requiring a central server, and distributes information regarding the location of
        shared files among members of the network. By means of a
        {\textquotedblleft}hashline{\textquotedblright} and forming a tree-structure
        based on the topology of the network, the system is able to answer location
        queries, and also discover and maintain routing information that is used to
        transfer files from a source-peer to another peer}, 
  www_section = {ad-hoc networks, file systems, P2P}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.124.9928.pdf}, 
}
2004_9
@article{2004_9,
  title = {Peer-to-Peer Networking \& -Computing}, 
  author = {Ralf Steinmetz and Klaus Wehrle}, 
  journal = {Informatik Spektrum}, 
  volume = {27}, 
  year = {2004}, 
  month = {February}, 
  pages = {51--54}, 
  abstract = {Unter dem Begriff Peer-to-Peer etabliert sich ein h{\"o}chst interessantes
        Paradigma f{\"u}r die Kommunikation im Internet. Obwohl urspr{\"u}nglich nur
        f{\"u}r die sehr pragmatischen und rechtlich umstrittenen Dateitauschb{\"o}rsen
        entworfen, k{\"o}nnen die Peerto-Peer-Mechanismen zur verteilten Nutzung
        unterschiedlichster Betriebsmittel genutzt werden und neue M{\"o}glichkeiten
        f{\"u}r Internetbasierte Anwendungen er{\"o}ffnen}, 
  www_section = {computing, networking, peer-to-peer networking}, 
  doi = {10.1007/s00287-003-0362-9}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Informatik\%20Spektrum\%20-\%20Peer-to-peer\%20networking\%20\%26\%20-computing.pdf},
}
2005_0
@conference{2005_0,
  title = {Chainsaw: Eliminating Trees from Overlay Multicast}, 
  author = {Vinay Pai and Kapil Kumar and Karthik Tamilmani and Vinay Sambamurthy and
        Alexander E. Mohr}, 
  booktitle = {4th International Workshop}, 
  organization = {Springer Berlin / Heidelberg}, 
  volume = {3640}, 
  year = {2005}, 
  month = {November}, 
  address = {Ithaca, NY, USA}, 
  pages = {127--140}, 
  editor = {Miguel Castro and Robbert Van Renesse}, 
  publisher = {Springer Berlin / Heidelberg}, 
  series = {Lecture Notes in Computer Science (Peer-to-peer Systems IV)}, 
  abstract = {In this paper, we present Chainsaw, a p2p overlay multicast system that
        completely eliminates trees. Peers are notified of new packets by their neighbors
        and must explicitly request a packet from a neighbor in order to receive it. This
        way, duplicate data can be eliminated and a peer can ensure it receives all
        packets. We show with simulations that Chainsaw has a short startup time, good
        resilience to catastrophic failure and essentially no packet loss. We support
        this argument with real-world experiments on Planetlab and compare Chainsaw to
        Bullet and Splitstream using MACEDON}, 
  www_section = {chainsaw, p2p overlay multicast system, packet loss, trees}, 
  isbn = {978-3-540-29068-1}, 
  issn = {1611-3349 (Online)}, 
  doi = {10.1007/11558989}, 
  url = {http://www.springerlink.com/content/l13550223q12l65v/about/}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/chainsaw.pdf}, 
}
2005_1
@article{2005_1,
  title = {Cooperation among strangers with limited information about reputation}, 
  author = {Gary E. Bolton and Elena Katok and Axel Ockenfels}, 
  journal = {Journal of Public Economics}, 
  volume = {89}, 
  year = {2005}, 
  month = {August}, 
  pages = {1457--1468}, 
  abstract = {The amount of institutional intervention necessary to secure
        efficiency-enhancing cooperation in markets and organizations, in circumstances
        where interactions take place among essentially strangers, depends critically on
        the amount of information informal reputation mechanisms need transmit. Models
        based on subgame perfection find that the information necessary to support
        cooperation is recursive in nature and thus information generating and processing
        requirements are quite demanding. Models that do not rely on subgame perfection,
        on the other hand, suggest that the information demands may be quite modest. The
        experiment we present indicates that even without any reputation information
        there is a non-negligible amount of cooperation that is, however, quite sensitive
        to the cooperation costs. For high costs, providing information about a partner's
        immediate past action increases cooperation. Recursive information about the
        partners' previous partners' reputation further promotes cooperation, regardless
        of the cooperation costs}, 
  www_section = {cooperation, experimental economics, reputation}, 
  doi = {doi:10.1016/j.jpubeco.2004.03.008}, 
  url = {doi:10.1016/j.jpubeco.2004.03.008}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Journal\%20of\%20Public\%20Economics\%20-\%20Bolton\%2C\%20Katok\%20\%26\%20Ockenfels.pdf},
}
2005_10
@article{2005_10,
  title = {Privacy Practices of Internet Users: Self-reports Versus Observed Behavior}, 
  author = {Jensen, Carlos and Potts, Colin and Jensen, Christian}, 
  journal = {Int. J. Hum.-Comput. Stud}, 
  volume = {63}, 
  year = {2005}, 
  pages = {203--227}, 
  abstract = {Several recent surveys conclude that people are concerned about privacy and
        consider it to be an important factor in their online decision making. This paper
        reports on a study in which (1) user concerns were analysed more deeply and (2)
        what users said was contrasted with what they did in an experimental e-commerce
        scenario. Eleven independent variables were shown to affect the online behavior
        of at least some groups of users. Most significant were trust marks present on
        web pages and the existence of a privacy policy, though users seldom consulted
        the policy when one existed. We also find that many users have inaccurate
        perceptions of their own knowledge about privacy technology and vulnerabilities,
        and that important user groups, like those similar to the Westin "privacy
        fundamentalists", do not appear to form a cohesive group for privacy-related
        decision making.In this study we adopt an experimental economic research
        paradigm, a method for examining user behavior which challenges the current
        emphasis on survey data. We discuss these issues and the implications of our
        results on user interpretation of trust marks and interaction design. Although
        broad policy implications are beyond the scope of this paper, we conclude by
        questioning the application of the ethical/legal doctrine of informed consent to
        online transactions in the light of the evidence that users frequently do not
        consult privacy policies}, 
  www_section = {decision-making, design, e-commerce, economic models, policy, privacy,
        survey}, 
  issn = {1071-5819}, 
  doi = {10.1016/j.ijhcs.2005.04.019}, 
  url = {http://dx.doi.org/10.1016/j.ijhcs.2005.04.019}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/PrivacyPractices2005Jensen.pdf},
}
2005_11
@book{2005_11,
  title = {Privacy-Preserving Set Operations}, 
  author = {Kissner, Lea and Song, Dawn}, 
  booktitle = {Advances in Cryptology -- CRYPTO 2005}, 
  organization = {Springer Berlin Heidelberg}, 
  volume = {3621}, 
  year = {2005}, 
  pages = {241--257}, 
  editor = {Shoup, Victor}, 
  publisher = {Springer Berlin Heidelberg}, 
  series = {Lecture Notes in Computer Science}, 
  abstract = {In many important applications, a collection of mutually distrustful parties
        must perform private computation over multisets. Each party's input to the
        function is his private input multiset. In order to protect these private sets,
        the players perform privacy-preserving computation; that is, no party learns more
        information about other parties' private input sets than what can be deduced from
        the result. In this paper, we propose efficient techniques for privacy-preserving
        operations on multisets. By building a framework of multiset operations,
        employing the mathematical properties of polynomials, we design efficient,
        secure, and composable methods to enable privacy-preserving computation of the
        union, intersection, and element reduction operations. We apply these techniques
        to a wide range of practical problems, achieving more efficient results than
        those of previous work}, 
  isbn = {978-3-540-28114-6}, 
  doi = {10.1007/11535218_15}, 
  url = {http://dx.doi.org/10.1007/11535218_15}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/PrivacyPreservingSetOperations2005Kissner.pdf},
%%%%% ERROR: Missing field
% www_section = {?????},
}
2005_12
@book{2005_12,
  title = {On Private Scalar Product Computation for Privacy-Preserving Data Mining}, 
  author = {Goethals, Bart and Laur, Sven and Lipmaa, Helger and Mielik{\"a}inen, Taneli}, 
  booktitle = {Information Security and Cryptology -- ICISC 2004}, 
  organization = {Springer Berlin Heidelberg}, 
  volume = {3506}, 
  year = {2005}, 
  pages = {104--120}, 
  editor = {Park, Choon-sik and Chee, Seongtaek}, 
  publisher = {Springer Berlin Heidelberg}, 
  series = {Lecture Notes in Computer Science}, 
  abstract = {In mining and integrating data from multiple sources, there are many privacy
        and security issues. In several different contexts, the security of the full
        privacy-preserving data mining protocol depends on the security of the underlying
        private scalar product protocol. We show that two of the private scalar product
        protocols, one of which was proposed in a leading data mining conference, are
        insecure. We then describe a provably private scalar product protocol that is
        based on homomorphic encryption and improve its efficiency so that it can also be
        used on massive datasets}, 
  www_section = {Privacy-preserving data mining, private scalar product protocol,
        vertically partitioned frequent pattern mining}, 
  isbn = {978-3-540-26226-8}, 
  doi = {10.1007/11496618_9}, 
  url = {http://dx.doi.org/10.1007/11496618_9}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/PrivateScalarProduct2004Goethals.pdf},
}
2005_13
@article{2005_13,
  title = {A Quick Introduction to Bloom Filters}, 
  author = {Christian Grothoff}, 
  journal = {unknown}, 
  institution = {The GNUnet Project}, 
  year = {2005}, 
  month = {August}, 
  www_section = {Bloom filter, GNUnet}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/bloomfilter.pdf}, 
}
2005_2
@conference{2005_2,
  title = {Correctness of a gossip based membership protocol}, 
  author = {Andre Allavena and Alan Demers and John E. Hopcroft}, 
  booktitle = {PDOC'05}, 
  organization = {ACM}, 
  year = {2005}, 
  publisher = {ACM}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/gossip-podc05.pdf}, 
%%%%% ERROR: Missing field
% www_section = {?????},
}
2005_3
@book{2005_3,
  title = {Distributed Hash Tables}, 
  author = {Klaus Wehrle and G{\"o}tz, Stefan and Rieche, Simon}, 
  booktitle = {Peer-to-Peer Systems and Applications}, 
  organization = {Springer}, 
  volume = {3485}, 
  year = {2005}, 
  chapter = {7}, 
  publisher = {Springer}, 
  series = {Lecture Notes in Computer Science}, 
  abstract = {In the last few years, an increasing number of massively distributed systems
        with millions of participants has emerged within very short time frames.
        Applications, such as instant messaging, file-sharing, and content distribution
        have attracted countless numbers of users. For example, Skype gained more than
        2.5 millions of users within twelve months, and more than 50\% of Internet
        traffic is originated by BitTorrent. These very large and still rapidly growing
        systems attest to a new era for the design and deployment of distributed systems.
        In particular, they reflect what the major challenges are today for designing and
        implementing distributed systems: scalability, flexibility, and instant
        deployment}, 
  www_section = {distributed hash table}, 
  doi = {10.1007/11530657_7}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/LNCS\%20-\%20Distributed\%20Hash\%20Tables.pdf},
}
2005_4
@conference{2005_4,
  title = {An empirical study of free-riding behavior in the maze p2p file-sharing system}, 
  author = {Yang, Mao and Zhang, Zheng and Li, Xiaoming and Dai, Yafei}, 
  booktitle = {Proceedings of the 4th international conference on Peer-to-Peer Systems}, 
  organization = {Springer-Verlag}, 
  year = {2005}, 
  address = {Berlin, Heidelberg}, 
  publisher = {Springer-Verlag}, 
  www_section = {free-riding, incentives, Sybil attack}, 
  isbn = {3-540-29068-0, 978-3-540-29068-1}, 
  doi = {10.1007/11558989_17}, 
  url = {http://dx.doi.org/10.1007/11558989_17}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/maze_freeride.pdf}, 
}
2005_5
@conference{2005_5,
  title = {Exchange-based incentive mechanisms for peer-to-peer file sharing}, 
  author = {Kostas G. Anagnostakis and Michael B. Greenwald}, 
  booktitle = {Proceedings of International Conference on Distributed Computing Systems
        2004}, 
  organization = {IEEE Computer Society}, 
  year = {2005}, 
  month = {March}, 
  address = {Tokyo, Japan}, 
  pages = {524--533}, 
  publisher = {IEEE Computer Society}, 
  abstract = {Performance of peer-to-peer resource sharing networks depends upon the level
        of cooperation of the participants. To date, cash-based systems have seemed too
        complex, while lighter-weight credit mechanisms have not provided strong
        incentives for cooperation. We propose exchange-based mechanisms that provide
        incentives for cooperation in peer-to-peer file sharing networks. Peers give
        higher service priority to requests from peers that can provide a simultaneous
        and symmetric service in return. We generalize this approach to n-way exchanges
        among rings of peers and present a search algorithm for locating such rings. We
        have used simulation to analyze the effect of exchanges on performance. Our
        results show that exchange-based mechanisms can provide strong incentives for
        sharing, offering significant improvements in service times for sharing users
        compared to free-riders, without the problems and complexity of cash- or
        credit-based systems}, 
  www_section = {exchange-based mechanism, peer-to-peer networking, sharing}, 
  isbn = {0-7695-2086-3}, 
  doi = {10.1109/ICDCS.2004.1281619}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/ICDCS\%2704.pdf}, 
}
2005_6
@conference{2005_6,
  title = {Fuzzy Identity-Based Encryption}, 
  author = {Amit Sahai and Waters, Brent}, 
  booktitle = {EUROCRYPT'05 Workshop on the Theory and Application of of Cryptographic
        Techniques}, 
  organization = {Springer}, 
  volume = {3494}, 
  year = {2005}, 
  month = {May}, 
  address = {Aarhus, Denmark}, 
  publisher = {Springer}, 
  series = {Lecture Notes in Computer Science}, 
%%%%% ERROR: Non-ASCII characters: ''ωωωω''
  abstract = {We introduce a new type of Identity-Based Encryption (IBE) scheme that we
        call Fuzzy Identity-Based Encryption. In Fuzzy IBE we view an identity as set of
        descriptive attributes. A Fuzzy IBE scheme allows for a private key for an
        identity, ω, to decrypt a ciphertext encrypted with an identity, ω , if and only
        if the identities ω and ω are close to each other as measured by the
        {\textquotedblleft}set overlap{\textquotedblright} distance metric. A Fuzzy IBE
        scheme can be applied to enable encryption using biometric inputs as identities;
        the error-tolerance property of a Fuzzy IBE scheme is precisely what allows for
        the use of biometric identities, which inherently will have some noise each time
        they are sampled. Additionally, we show that Fuzzy-IBE can be used for a type of
        application that we term {\textquotedblleft}attribute-based
        encryption{\textquotedblright}. In this paper we present two constructions of
        Fuzzy IBE schemes. Our constructions can be viewed as an Identity-Based
        Encryption of a message under several attributes that compose a (fuzzy) identity.
        Our IBE schemes are both error-tolerant and secure against collusion attacks.
        Additionally, our basic construction does not use random oracles. We prove the
        security of our schemes under the Selective-ID security model}, 
  www_section = {Fuzzy IBE, IBE}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/EUROCRYPT\%2705\%20-\%20Fuzzy\%20Identity-Based\%20Encryption.pdf},
}
2005_8
@conference{2005_8,
  title = {OpenDHT: a public DHT service and its uses}, 
  author = {unknown}, 
  booktitle = {Proceedings of the 2005 conference on Applications, technologies,
        architectures, and protocols for computer communications}, 
  organization = {ACM}, 
  year = {2005}, 
  address = {New York, NY, USA}, 
  pages = {73--84}, 
  publisher = {ACM}, 
  series = {SIGCOMM '05}, 
  www_section = {distributed hash table, openDHT, peer-to-peer, resource allocation}, 
  isbn = {1-59593-009-4}, 
  doi = {10.1145/1080091.1080102}, 
  url = {http://doi.acm.org/10.1145/1080091.1080102}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/openDHT.pdf}, 
}
2005_9
@article{2005_9,
  title = {P2P Contracts: a Framework for Resource and Service Exchange}, 
  author = {Dipak Ghosal and Benjamin K. Poon and Keith Kong}, 
  journal = {FGCS. Future Generations Computer Systems}, 
  volume = {21}, 
  year = {2005}, 
  month = {March}, 
  pages = {333--347}, 
  abstract = {A crucial aspect of Peer-to-Peer (P2P) systems is that of providing
        incentives for users to contribute their resources to the system. Without such
        incentives, empirical data show that a majority of the participants act asfree
        riders. As a result, a substantial amount of resource goes untapped, and,
        frequently, P2P systems devolve into client-server systems with attendant issues
        of performance under high load. We propose to address the free rider problem by
        introducing the notion of a P2P contract. In it, peers are made aware of the
        benefits they receive from the system as a function of their contributions. In
        this paper, we first describe a utility-based framework to determine the
        components of the contract and formulate the associated resource allocation
        problem. We consider the resource allocation problem for a flash crowd scenario
        and show how the contract mechanism implemented using a centralized server can be
        used to quickly create pseudoservers that can serve out the requests. We then
        study a decentralized implementation of the P2P contract scheme in which each
        node implements the contract based on local demand. We show that in such a
        system, other than contributing storage and bandwidth to serve out requests, it
        is also important that peer nodes function as application-level routers to
        connect pools of available pseudoservers. We study the performance of the
        distributed implementation with respect to the various parameters including the
        terms of the contract and the triggers to create pseudoservers and routers}, 
  www_section = {contracts, framework, P2P, peer-to-peer networking, resource exchange,
        service exchange}, 
  issn = {0167-739X}, 
  doi = {10.1016/j.future.2004.04.013}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/FGCS\%20-\%20P2P\%20Contracts\%3A\%20a\%20Framework\%20for\%20Resource\%20and\%20Service\%20Exchange.pdf},
}
2006_0
@mastersthesis{2006_0,
  title = {Access Control in Peer-to-Peer Storage Systems}, 
  author = {Erol Ko{\c c}}, 
  school = {Eidgen{\"o}ssische Technische Hochschule Z{\"u}rich (ETH)}, 
  volume = {Communication Systems}, 
  year = {2006}, 
  month = {October}, 
  address = {Zurich, Switzerland}, 
  pages = {0--159}, 
  type = {Master's Thesis}, 
  www_section = {access control, peer-to-peer storage system}, 
  url = {http://webcache.googleusercontent.com/u/ethweb?oe=utf8\&GO.x=0\&GO.y=0\&hl=es\&q=cache:7sJLnyzj1TcJ:http://www.zisc.ethz.ch/events/ISC20067Slides/MA_Report_Erol_Koc.pdf+Erol+Ko\%C3\%A7\&ct=clnk},
}
2006_1
@conference{2006_1,
  title = {Combating Hidden Action in Unstructured Peer-to-Peer Systems}, 
  author = {Qi Zhao and Jianzhong Zhang and Jingdong Xu}, 
  booktitle = {ChinaCom '06. First International Conference on Communications and
        Networking in China}, 
  organization = {IEEE Computer Society}, 
  year = {2006}, 
  month = {October}, 
  address = {Beijing, China}, 
  pages = {1--5}, 
  publisher = {IEEE Computer Society}, 
  abstract = {In unstructured peer-to-peer systems, cooperation by the intermediate peers
        are essential for the success of queries. However, intermediate peers may choose
        to forward packets at a low priority or not forward the packets at all, which is
        referred as peers' hidden action. Hidden action may lead to significant decrement
        of search efficiency. In contrast to building a global system with reputations or
        economics, we proposed MSSF, an improved search method, to help queries route
        around the peers with hidden action. MSSF does not need to check other peers'
        behavior. It automatically adapts to change query routes according to the
        previous query results. Simulation results show that MSSF is more robust than
        Gnutella flooding when peers with hidden action increase}, 
  www_section = {cooperation, hidden action, unstructured peer-to-peer system}, 
  isbn = {1-4244-0463-0}, 
  doi = {http://dx.doi.org/10.1109/CHINACOM.2006.344762}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/ChinaCom\%2706\%20-\%20Combating\%20hidden\%20action\%20in\%20unstructured\%20p2p\%20systems.pdf},
}
2006_10
@book{2006_10,
  title = {DNS-Based Service Discovery in Ad Hoc Networks: Evaluation and Improvements}, 
  author = {Celeste Campo and Carlos Garc{\'\i}a-Rubio}, 
  organization = {Springer Berlin / Heidelberg}, 
  volume = {Volume 4217/2006}, 
  year = {2006}, 
  publisher = {Springer Berlin / Heidelberg}, 
  abstract = {In wireless networks, devices must be able to dynamically discover and share
        services in the environment. The problem of service discovery has attracted great
        research interest in the last years, particularly for ad hoc networks. Recently,
        the IETF has proposed the use of the DNS protocol for service discovery. For ad
        hoc networks, the IETF works in two proposals of distributed DNS, Multicast DNS
        and LLMNR, that can both be used for service discovery. In this paper we describe
        and compare through simulation the performance of service discovery based in
        these two proposals of distributed DNS. We also propose four simple improvements
        that reduce the traffic generated, and so the power consumption, especially of
        the most limited, battery powered, devices. We present simulation results that
        show the impact of our improvements in a typical scenario}, 
  www_section = {ad-hoc networks, DNS}, 
  issn = {978-3-540-45174-7}, 
  doi = {10.1007/11872153}, 
  url = {http://www.springerlink.com/content/m8322m1006416270/}, 
}
2006_11
@conference{2006_11,
  title = {Improving traffic locality in BitTorrent via biased neighbor selection}, 
  author = {Ruchir Bindal and Pei Cao and William Chan and Jan Medved and George Suwala and
        Tony Bates and Amy Zhang}, 
  booktitle = {Proceedings of the 26th IEEE International Conference on Distributed
        Computing Systems}, 
  organization = {IEEE Computer Society}, 
  year = {2006}, 
  month = {January}, 
  address = {Lisboa, Portugal}, 
  pages = {0--66}, 
  publisher = {IEEE Computer Society}, 
  abstract = {Peer-to-peer (P2P) applications such as BitTorrent ignore traffic costs at
        ISPs and generate a large amount of cross-ISP traffic. As a result, ISPs often
        throttle BitTorrent traffic to control the cost. In this paper, we examine a new
        approach to enhance BitTorrent traffic locality, biased neighbor selection, in
        which a peer chooses the majority, but not all, of its neighbors from peers
        within the same ISP. Using simulations, we show that biased neighbor selection
        maintains the nearly optimal performance of Bit- Torrent in a variety of
        environments, and fundamentally reduces the cross-ISP traffic by eliminating the
        traffic's linear growth with the number of peers. Key to its performance is the
        rarest first piece replication algorithm used by Bit- Torrent clients. Compared
        with existing locality-enhancing approaches such as bandwidth limiting, gateway
        peers, and caching, biased neighbor selection requires no dedicated servers and
        scales to a large number of BitTorrent networks}, 
  www_section = {BitTorrent, neighbor selection, peer-to-peer networking, performance,
        traffic locality}, 
  isbn = {0-7695-2540-7}, 
  issn = {1063-6927}, 
  doi = {10.1109/ICDCS.2006.48}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/ICDCS\%2706\%20-\%20Improving\%20traffic\%20locality\%20in\%20BitTorrent.pdf},
}
2006_12
@book{2006_12,
  title = {Less Hashing, Same Performance: Building a Better Bloom Filter}, 
  author = {Kirsch, Adam and Mitzenmacher, Michael}, 
  booktitle = {Algorithms -- ESA 2006}, 
  organization = {Springer Berlin Heidelberg}, 
  volume = {4168}, 
  year = {2006}, 
  pages = {456--467}, 
  editor = {Azar, Yossi and Erlebach, Thomas}, 
  publisher = {Springer Berlin Heidelberg}, 
  series = {Lecture Notes in Computer Science}, 
  abstract = {A standard technique from the hashing literature is to use two hash functions
        h1(x) and h2(x) to simulate additional hash functions of the form gi (x) = h1(x)
        + ih2(x). We demonstrate that this technique can be usefully applied to Bloom
        filters and related data structures. Specifically, only two hash functions are
        necessary to effectively implement a Bloom filter without any loss in the
        asymptotic false positive probability. This leads to less computation and
        potentially less need for randomness in practice}, 
  isbn = {978-3-540-38875-3}, 
  doi = {10.1007/11841036_42}, 
  url = {http://dx.doi.org/10.1007/11841036_42}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/LessHashing2006Kirsch.pdf},
%%%%% ERROR: Missing field
% www_section = {?????},
}
2006_13
@conference{2006_13,
  title = {Our Data, Ourselves: Privacy via Distributed Noise Generation}, 
  author = {Dwork, Cynthia and Kenthapadi, Krishnaram and McSherry, Frank and Mironov, Ilya
        and Naor, Moni}, 
  booktitle = {Proceedings of the 24th Annual International Conference on The Theory and
        Applications of Cryptographic Techniques}, 
  organization = {Springer-Verlag}, 
  year = {2006}, 
  address = {Berlin, Heidelberg}, 
  publisher = {Springer-Verlag}, 
  abstract = {In this work we provide efficient distributed protocols for generating shares
        of random noise, secure against malicious participants. The purpose of the noise
        generation is to create a distributed implementation of the privacy-preserving
        statistical databases described in recent papers [14, 4, 13]. In these databases,
        privacy is obtained by perturbing the true answer to a database query by the
        addition of a small amount of Gaussian or exponentially distributed random noise.
        The computational power of even a simple form of these databases, when the query
        is just of the form sum over all rows 'i' in the database of a function  f
         applied to the data in row i, has been demonstrated in [4]. A distributed
        implementation eliminates the need for a trusted database administrator. The
        results for noise generation are of independent interest. The generation of
        Gaussian noise introduces a technique for distributing shares of many unbiased
        coins with fewer executions of verifiable secret sharing than would be needed
        using previous approaches (reduced by a factor of n). The generation of
        exponentially distributed noise uses two shallow circuits: one for generating
        many arbitrarily but identically biased coins at an amortized cost of two
        unbiased random bits apiece, independent of the bias, and the other to combine
        bits of appropriate biases to obtain an exponential distribution}, 
  isbn = {3-540-34546-9, 978-3-540-34546-6}, 
  doi = {10.1007/11761679_29}, 
  url = {http://dx.doi.org/10.1007/11761679_29}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/OurData2006Dwork.pdf}, 
%%%%% ERROR: Missing field
% www_section = {?????},
}
2006_14
@conference{2006_14,
  title = {Peer to peer size estimation in large and dynamic networks: A comparative
        study}, 
  author = {Erwan Le Merrer and Anne-Marie Kermarrec and Massouli{\'e}, Laurent}, 
  booktitle = {HPDC'06--15th IEEE International Symposium on High Performance Distributed
        Computing}, 
  organization = {IEEE Computer Society}, 
  year = {2006}, 
  month = {June}, 
  address = {Paris, France}, 
  publisher = {IEEE Computer Society}, 
  abstract = {As the size of distributed systems keeps growing, the peer to peer
        communication paradigm has been identified as the key to scalability. Peer to
        peer overlay networks are characterized by their self-organizing capabilities,
        resilience to failure and fully decentralized control. In a peer to peer overlay,
        no entity has a global knowledge of the system. As much as this property is
        essential to ensure the scalability, monitoring the system under such
        circumstances is a complex task. Yet, estimating the size of the system is core
        functionality for many distributed applications to parameter setting or
        monitoring purposes. In this paper, we propose a comparative study between three
        algorithms that estimate in a fully decentralized way the size of a peer to peer
        overlay. Candidate approaches are generally applicable irrespective of the
        underlying structure of the peer to peer overlay. The paper reports the head to
        head comparison of estimation system size algorithms. The simulations have been
        conducted using the same simulation framework and inputs and highlight the
        differences in cost and accuracy of the estimation between the algorithms both in
        static and dynamic settings}, 
  www_section = {comparison, counting, network size estimation, peer to peer}, 
  isbn = {1-4244-0307-3}, 
  doi = {http://dx.doi.org/10.1109/HPDC.2006.1652131}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/HPDC\%2706\%20-\%20Peer\%20to\%20peer\%20size\%20estimation\%20in\%20large\%20and\%20dynamic\%20networks.pdf},
}
2006_15
@conference{2006_15,
  title = {Privacy Preserving Nearest Neighbor Search}, 
  author = {Shaneck, M. and Yongdae Kim and Kumar, V.}, 
  booktitle = {Data Mining Workshops, 2006. ICDM Workshops 2006. Sixth IEEE International
        Conference on}, 
  year = {2006}, 
  month = {December}, 
  abstract = {Data mining is frequently obstructed by privacy concerns. In many cases data
        is distributed, and bringing the data together in one place for analysis is not
        possible due to privacy laws (e.g. HIPAA) or policies. Privacy preserving data
        mining techniques have been developed to address this issue by providing
        mechanisms to mine the data while giving certain privacy guarantees. In this work
        we address the issue of privacy preserving nearest neighbor search, which forms
        the kernel of many data mining applications. To this end, we present a novel
        algorithm based on secure multiparty computation primitives to compute the
        nearest neighbors of records in horizontally distributed data. We show how this
        algorithm can be used in three important data mining algorithms, namely LOF
        outlier detection, SNN clustering, and kNN classification}, 
  www_section = {Clustering algorithms, Computer science, Conferences, cryptography, Data
        mining, data privacy, distributed computing, Kernel, kNN classification, LOF
        outlier detection, Medical diagnostic imaging, multiparty computation primitives,
        nearest neighbor search, Nearest neighbor searches, pattern clustering, privacy
        preservation, SNN clustering}, 
  doi = {10.1109/ICDMW.2006.133}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/PrivacyPreserving2006Shaneck.pdf},
}
2006_16
@article{2006_16,
  title = {Reactive Clustering in MANETs}, 
  author = {Cramer, Curt and Oliver Stanze and Kilian Weniger and Martina Zitterbart}, 
  journal = {International Journal of Pervasive Computing and Communications}, 
  volume = {2}, 
  year = {2006}, 
  pages = {81--90}, 
  publisher = {unknown}, 
  abstract = {Many clustering protocols for mobile ad hoc networks (MANETs) have been
        proposed in the literature. With only one exception so far (1), all these
        protocols are proactive, thus wasting bandwidth when their function is not
        currently needed. To reduce the signalling traffic load, reactive clustering may
        be employed.We have developed a clustering protocol named
        {\textquotedblleft}On-Demand Group Mobility-Based Clustering{\textquotedblright}
        (ODGMBC) (2), (3) which is reactive. Its goal is to build clusters as a basis for
        address autoconfiguration and hierarchical routing. In contrast to the protocol
        described in ref. (1), the design process especially addresses the notions of
        group mobility and of multi-hop clusters in a MANET. As a result, ODGMBC maps
        varying physical node groups onto logical clusters. In this paper, ODGMBC is
        described. It was implemented for the ad hoc network simulator GloMoSim (4) and
        evaluated using several performance indicators. Simulation results are promising
        and show that ODGMBC leads to stable clusters. This stability is advantageous for
        autoconfiguration and routing mechansims to be employed in conjunction with the
        clustering algorithm}, 
  www_section = {mobile Ad-hoc networks, multi-hop networks}, 
  doi = {10.1108/17427370780000143}, 
  url = {http://www.emeraldinsight.com/journals.htm?articleid=1615724\&show=pdf}, 
}
2006_17
@book{2006_17,
  title = {Reputation Mechanisms}, 
  author = {Chrysanthos Dellarocas}, 
  booktitle = {Handbook on Information Systems and Economics}, 
  organization = {Elsevier}, 
  year = {2006}, 
  pages = {629--660}, 
  publisher = {Elsevier}, 
  www_section = {online marketplace, reputation mechanism}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Dellarocas\%20-\%20Reputation\%20Mechanisms.pdf},
}
2006_18
@conference{2006_18,
  title = {Scalable Routing in Sensor Actuator Networks with Churn}, 
  author = {unknown}, 
  booktitle = {Sensor and Ad Hoc Communications and Networks, 2006. SECON '06. 2006 3rd
        Annual IEEE Communications Society on}, 
  year = {2006}, 
  month = {September}, 
  abstract = {Routing in wireless networks is inherently difficult since their network
        topologies are typically unstructured and unstable. Therefore, many routing
        protocols for ad-hoc networks and sensor networks revert to flooding to acquire
        routes to previously unknown destinations. However, such an approach does not
        scale to large networks, especially when nodes need to communicate with many
        different destinations. This paper advocates a novel approach, the scalable
        source routing (SSR) protocol. It combines overlay-like routing in a virtual
        network structure with source routing in the physical network structure. As a
        consequence, SSR can efficiently provide the routing semantics of a structured
        routing overlay, making it an efficient basis for the scalable implementation of
        fully decentralized applications. In T. Fuhrmann (2005) it has been demonstrated
        that SSR can almost entirely avoid flooding, thus leading to a both memory and
        message efficient routing mechanism for large unstructured networks. This paper
        extends SSR to unstable networks, i. e. networks with churn where nodes
        frequently join and leave, the latter potentially ungracefully}, 
  www_section = {ad-hoc networks, scalable source routing}, 
  isbn = {1-4244-0626-9}, 
  doi = {10.1109/SAHCN.2006.288406}, 
  url = {http://ieeexplore.ieee.org/Xplore/login.jsp?url=http\%3A\%2F\%2Fieeexplore.ieee.org\%2Fiel5\%2F4068086\%2F4068087\%2F04068105.pdf\%3Farnumber\%3D4068105\&authDecision=-203},
}
2006_2
@book{2006_2,
  title = {Combinatorial Auctions}, 
  author = {Peter Cramton and Yoav Shoham and Richard Steinberg}, 
  organization = {MIT Press}, 
  year = {2006}, 
  address = {Cambridge, MA}, 
  pages = {0--649}, 
  publisher = {MIT Press}, 
  abstract = {The study of combinatorial auctions -- auctions in which bidders can bid on
        combinations of items or "packages" -- draws on the disciplines of economics,
        operations research, and computer science. This landmark collection integrates
        these three perspectives, offering a state-of-the art survey of developments in
        combinatorial auction theory and practice by leaders in the field.Combinatorial
        auctions (CAs), by allowing bidders to express their preferences more fully, can
        lead to improved economic efficiency and greater auction revenues. However,
        challenges arise in both design and implementation. Combinatorial Auctions
        addresses each of these challenges. After describing and analyzing various CA
        mechanisms, the book addresses bidding languages and questions of efficiency.
        Possible strategies for solving the computationally intractable problem of how to
        compute the objective-maximizing allocation (known as the winner determination
        problem) are considered, as are questions of how to test alternative algorithms.
        The book discusses five important applications of CAs: spectrum auctions, airport
        takeoff and landing slots, procurement of freight transportation services, the
        London bus routes market, and industrial procurement. This unique collection
        makes recent work in CAs available to a broad audience of researchers and
        practitioners. The integration of work from the three disciplines underlying CAs,
        using a common language throughout, serves to advance the field in theory and
        practice}, 
  www_section = {combinatorial auctions, winner determination problem}, 
  isbn = {0262033429}, 
  issn = {978-0262033428}, 
  url = {http://works.bepress.com/cramton/35}, 
}
2006_20
@article{2006_20,
  title = {A survey on networking games in telecommunications}, 
  author = {Eitan Altman and Thomas Boulogne and Rachid El-Azouzi and Tania Jim{\'e}nez and
        Laura Wynter}, 
  journal = {Computers \& Operations Research}, 
  volume = {33}, 
  year = {2006}, 
  month = {February}, 
  pages = {286--311}, 
  publisher = {Elsevier}, 
  abstract = {In this survey, we summarize different modeling and solution concepts of
        networking games, as well as a number of different applications in
        telecommunications that make use of or can make use of networking games. We
        identify some of the mathematical challenges and methodologies that are involved
        in these problems. We include here work that has relevance to networking games in
        telecommunications from other areas, in particular from transportation planning}, 
  www_section = {communication network, game theory}, 
  doi = {10.1016/j.cor.2004.06.005}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/netgames.pdf}, 
}
2006_21
@book{2006_21,
  title = {Unconditionally Secure Constant-Rounds Multi-party Computation for Equality,
        Comparison, Bits and Exponentiation}, 
  author = {Damg{\'a}rd, Ivan and Fitzi, Matthias and Kiltz, Eike and Nielsen, JesperBuus
        and Toft, Tomas}, 
  booktitle = {Theory of Cryptography}, 
  organization = {Springer Berlin Heidelberg}, 
  volume = {3876}, 
  year = {2006}, 
  pages = {285--304}, 
  editor = {Halevi, Shai and Rabin, Tal}, 
  publisher = {Springer Berlin Heidelberg}, 
  series = {Lecture Notes in Computer Science}, 
%%%%% ERROR: Non-ASCII characters: ''⌈⌉''
  abstract = {We show that if a set of players hold shares of a value a {\epsilon} Fp for
        some prime p (where the set of shares is written [a] p ), it is possible to
        compute, in constant rounds and with unconditional security, sharings of the bits
        of a, i.e., compute sharings [a0] p , ..., [al- 1] p such that l = ⌈ log2 p ⌉,
        a0,...,al--1 {\epsilon} {0,1} and a = summation of ai * 2^i where 0 <= i <= l- 1.
        Our protocol is secure against active adversaries and works for any linear secret
        sharing scheme with a multiplication protocol. The complexity of our protocol is
        O(llogl) invocations of the multiplication protocol for the underlying secret
        sharing scheme, carried out in O(1) rounds. This result immediately implies
        solutions to other long-standing open problems such as constant-rounds and
        unconditionally secure protocols for deciding whether a shared number is zero,
        comparing shared numbers, raising a shared number to a shared exponent and
        reducing a shared number modulo a shared modulus}, 
  isbn = {978-3-540-32731-8}, 
  doi = {10.1007/11681878_15}, 
  url = {http://dx.doi.org/10.1007/11681878_15}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/UnconditionallySecure2006Damgard.pdf},
%%%%% ERROR: Missing field
% www_section = {?????},
}
2006_3
@book{2006_3,
  title = {Combining Virtual and Physical Structures for Self-organized Routing}, 
  author = {Thomas Fuhrmann}, 
  booktitle = {Self-Organizing Systems}, 
  volume = {Volume 4124/2006}, 
  year = {2006}, 
  publisher = {unknown}, 
  series = {Lecture Notes in Computer Science}, 
  abstract = {Our recently proposed scalable source routing (SSR) protocol combines source
        routing in the physical network with Chord-like routing in the virtual ring that
        is formed by the address space. Thereby, SSR provides self-organized routing in
        large unstructured networks of resource-limited devices. Its ability to quickly
        adapt to changes in the network topology makes it suitable not only for
        sensor-actuator networks but also for mobile ad-hoc networks. Moreover, SSR
        directly provides the key-based routing semantics, thereby making it an efficient
        basis for the scalable implementation of self-organizing, fully decentralized
        applications. In this paper we review SSR's self-organizing features and
        demonstrate how the combination of virtual and physical structures leads to
        emergence of stability and efficiency. In particular, we focus on SSR's
        resistance against node churn. Following the principle of combining virtual and
        physical structures, we propose an extension that stabilizes SSR in face of heavy
        node churn. Simulations demonstrate the effectiveness of this extension}, 
  www_section = {Chord, scalable source routing, self-organization}, 
  issn = {978-3-540-37658-3}, 
  doi = {10.1007/11822035}, 
  url = {http://www.springerlink.com/content/4540535t4v2g2548/}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Combining\%20Virtual\%20and\%20Physical\%20Structures\%20for\%20Self-organized\%20Routing_0.pdf},
}
2006_4
@article{2006_4,
  title = {Communication Networks On the fundamental communication abstraction supplied by
        P2P overlay networks}, 
  author = {Cramer Curt and Thomas Fuhrmann}, 
  journal = {unknown}, 
  year = {2006}, 
  abstract = {The disruptive advent of peer-to-peer (P2P) file sharing in 2000 attracted
        significant interest. P2P networks have matured from their initial form,
        unstructured overlays, to structured overlays like distributed hash tables
        (DHTs), which are considered state-of-the-art. There are huge efforts to improve
        their performance. Various P2P applications like distributed storage and
        application-layer multicast were proposed. However, little effort was spent to
        understand the communication abstraction P2P overlays supply. Only when it is
        understood, the reach of P2P ideas will significantly broaden. Furthermore, this
        clarification reveals novel approaches and highlights future directions. In this
        paper, we reconsider well-known P2P overlays, linking them to insights from
        distributed systems research. We conclude that the main communication abstraction
        is that of a virtual address space or application-specific naming. On this basis,
        P2P systems build a functional layer implementing, for example lookup,
        indirection and distributed processing. Our insights led us to identify
        interesting and unexplored points in the design space}, 
  www_section = {distributed hash table, P2P}, 
  url = {http://www3.interscience.wiley.com/journal/109858517/abstract}, 
}
2006_5
@article{2006_5,
  title = {Complementary currency innovations: Self-guarantee in peer-to-peer currencies}, 
  author = {Mitra Ardron and Bernard Lietaer}, 
  journal = {International Journal of Community Currency Research}, 
  volume = {10}, 
  year = {2006}, 
  month = {January}, 
  pages = {1--7}, 
  abstract = {The WAT system, as used in Japan, allows for businesses to issue their own
        tickets (IOU's) which can circulate as a complementary currency within a
        community. This paper proposes a variation on that model, where the issuer of a
        ticket can offer a guarantee, in the form of some goods or services. The
        difference in value, along with a reasonable acceptance that the issuer is
        capable of delivering the service or goods, allows for a higher degree of
        confidence in the ticket, and therefore a greater liquidity}, 
  www_section = {guarantee, peer-to-peer currencies}, 
  issn = {1325-9547}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/IJCCR\%20vol\%2010\%20\%282006\%29\%201\%20Ardron\%20and\%20Lietaer.pdf},
}
2006_6
@conference{2006_6,
  title = {Curve25519: new Diffie-Hellman speed records}, 
  author = {Daniel J. Bernstein}, 
  booktitle = {PKC}, 
  year = {2006}, 
  month = {February}, 
  www_section = {Curve25519, ECC, ECDH, GNUnet}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/curve25519-20060209.pdf},
}
2006_7
@book{2006_7,
  title = {Designing Economics Mechanisms}, 
  author = {Leonid Hurwicz and Stanley Reiter}, 
  organization = {Cambridge University Press}, 
  year = {2006}, 
  address = {Cambridge, U.K}, 
  publisher = {Cambridge University Press}, 
  abstract = {A mechanism is a mathematical structure that models institutions through
        which economic activity is guided and coordinated. There are many such
        institutions; markets are the most familiar ones. Lawmakers, administrators and
        officers of private companies create institutions in order to achieve desired
        goals. They seek to do so in ways that economize on the resources needed to
        operate the institutions, and that provide incentives that induce the required
        behaviors. This book presents systematic procedures for designing mechanisms that
        achieve specified performance, and economize on the resources required to operate
        the mechanism. The systematic design procedures are algorithms for designing
        informationally efficient mechanisms. Most of the book deals with these
        procedures of design. When there are finitely many environments to be dealt with,
        and there is a Nash-implementing mechanism, our algorithms can be used to make
        that mechanism into an informationally efficient one. Informationally efficient
        dominant strategy implementation is also studied. Leonid Hurwicz is the Nobel
        Prize Winner 2007 for The Sveriges Riksbank Prize in Economic Sciences in Memory
        of Alfred Nobel, along with colleagues Eric Maskin and Roger Myerson, for his
        work on the effectiveness of markets}, 
  www_section = {algorithms, Complexity, Computational Geometry, Computer Algebra,
        Economics: general interest}, 
  isbn = {9780521836418}, 
  doi = {http://dx.doi.org/10.1017/CBO9780511754258}, 
}
2006_8
@book{2006_8,
  title = {Differential Privacy}, 
  author = {Dwork, Cynthia}, 
  booktitle = {Automata, Languages and Programming}, 
  organization = {Springer Berlin Heidelberg}, 
  volume = {4052}, 
  year = {2006}, 
  pages = {1--12}, 
  editor = {Bugliesi, Michele and Preneel, Bart and Sassone, Vladimiro and Wegener, Ingo}, 
  publisher = {Springer Berlin Heidelberg}, 
  series = {Lecture Notes in Computer Science}, 
  abstract = {In 1977 Dalenius articulated a desideratum for statistical databases: nothing
        about an individual should be learnable from the database that cannot be learned
        without access to the database. We give a general impossibility result showing
        that a formalization of Dalenius' goal along the lines of semantic security
        cannot be achieved. Contrary to intuition, a variant of the result threatens the
        privacy even of someone not in the database. This state of affairs suggests a new
        measure, differential privacy, which, intuitively, captures the increased risk to
        one's privacy incurred by participating in a database.The techniques developed in
        a sequence of papers [8, 13, 3], culminating in those described in [12], can
        achieve any desired level of privacy under this measure. In many cases, extremely
        accurate information about the database can be provided while simultaneously
        ensuring very high levels of privacy}, 
  isbn = {978-3-540-35907-4}, 
  doi = {10.1007/11787006_1}, 
  url = {http://dx.doi.org/10.1007/11787006_1}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/DifferentialPrivacy2006Dwork_0.pdf},
%%%%% ERROR: Missing field
% www_section = {?????},
}
2006_9
@mastersthesis{2006_9,
  title = {Distributed k-ary System: Algorithms for Distributed Hash Tables}, 
  author = {Ali Ghodsi}, 
  school = {KTH/Royal Institute of Technology}, 
  year = {2006}, 
  month = {December}, 
  address = {Stockholm}, 
  pages = {0--209}, 
  type = {Doctoral}, 
  abstract = {This dissertation presents algorithms for data structures called distributed
        hash tables (DHT) or structured overlay networks, which are used to build
        scalable self-managing distributed systems. The provided algorithms guarantee
        lookup consistency in the presence of dynamism: they guarantee consistent lookup
        results in the presence of nodes joining and leaving. Similarly, the algorithms
        guarantee that routing never fails while nodes join and leave. Previous
        algorithms for lookup consistency either suffer from starvation, do not work in
        the presence of failures, or lack proof of correctness. Several group
        communication algorithms for structured overlay networks are presented. We
        provide an overlay broadcast algorithm, which unlike previous algorithms avoids
        redundant messages, reaching all nodes in O(log n) time, while using O(n)
        messages, where n is the number of nodes in the system. The broadcast algorithm
        is used to build overlay multicast. We introduce bulk operation, which enables a
        node to efficiently make multiple lookups or send a message to all nodes in a
        specified set of identifiers. The algorithm ensures that all specified nodes are
        reached in O(log n) time, sending maximum O(log n) messages per node, regardless
        of the input size of the bulk operation. Moreover, the algorithm avoids sending
        redundant messages. Previous approaches required multiple lookups, which consume
        more messages and can render the initiator a bottleneck. Our algorithms are used
        in DHT-based storage systems, where nodes can do thousands of lookups to fetch
        large files. We use the bulk operation algorithm to construct a pseudo-reliable
        broadcast algorithm. Bulk operations can also be used to implement efficient
        range queries. Finally, we describe a novel way to place replicas in a DHT,
        called symmetric replication, that enables parallel recursive lookups. Parallel
        lookups are known to reduce latencies. However, costly iterative lookups have
        previously been used to do parallel lookups. Moreover, joins or leaves only
        require exchanging O(1) messages, while other schemes require at least log(f)
        messages for a replication degree of f. The algorithms have been implemented in a
        middleware called the Distributed k-ary System (DKS), which is briefly
        described}, 
  www_section = {distributed hash table, distributed k-ary system, DKS}, 
  url = {http://eprints.sics.se/516/}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Ghodsi\%20-\%20Distributed\%20k-ary\%20System.pdf},
}
2007_0
@article{2007_0,
  title = {Analyzing Peer Behavior in KAD}, 
  author = {Steiner, Moritz and En-Najjary, Taoufik and E W Biersack}, 
  journal = {unknown}, 
  institution = {Institut Eurecom}, 
  number = {RR-07-205}, 
  year = {2007}, 
  month = {October}, 
  address = {Sophia Antipolis}, 
  type = {Tech report}, 
  abstract = {Distributed hash tables (DHTs) have been actively studied in literature and
        many different proposals have been made on how to organize peers in a DHT.
        However, very few DHTs have been implemented in real systems and deployed on a
        large scale. One exception is KAD, a DHT based on Kademlia, which is part of
        eDonkey2000, a peer-to-peer file sharing system with several million simultaneous
        users. We have been crawling KAD continuously for about six months and obtained
        information about geographical distribution of peers, session times, peer
        availability, and peer lifetime. We also evaluated to what extent information
        about past peer uptime can be used to predict the remaining uptime of the peer.
        Peers are identified by the so called KAD ID, which was up to now as- sumed to
        remain the same across sessions. However, we observed that this is not the case:
        There is a large number of peers, in particular in China, that change their KAD
        ID, sometimes as frequently as after each session. This change of KAD IDs makes
        it difficult to characterize end-user availability or membership turnover. By
        tracking end-users with static IP addresses, we could measure the rate of change
        of KAD ID per end-user}, 
  www_section = {distributed hash table, KAD, peer behavior}, 
  issn = {RR-07-205}, 
  url = {http://www.eurecom.fr/~btroup/kadtraces/}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Tech\%20Report\%20-\%20Analyzing\%20peer\%20behavior\%20in\%20KAD.pdf},
}
2007_1
@book{2007_1,
  title = {B.A.T.M.A.N Status Report}, 
  author = {Axel Neumann and Corinna Elektra Aichele and Marek Lindner}, 
  year = {2007}, 
  publisher = {unknown}, 
  abstract = {This report documents the current status of the development and
        implementation of the B.A.T.M.A.N (better approach to mobile ad-hoc networking)
        routing protocol. B.A.T.M.A.N uses a simple and robust algorithm for establishing
        multi-hop routes in mobile ad-hoc networks.It ensures highly adaptive and
        loop-free routing while causing only low processing and traffic cost}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/batman-status.pdf}, 
%%%%% ERROR: Missing field
% www_section = {?????},
}
2007_10
@book{2007_10,
  title = {Practical and Secure Solutions for Integer Comparison}, 
  author = {Garay, Juan and Schoenmakers, Berry and Villegas, Jos{\'e}}, 
  booktitle = {Public Key Cryptography -- PKC 2007}, 
  organization = {Springer Berlin Heidelberg}, 
  volume = {4450}, 
  year = {2007}, 
  pages = {330--342}, 
  editor = {Okamoto, Tatsuaki and Wang, Xiaoyun}, 
  publisher = {Springer Berlin Heidelberg}, 
  series = {Lecture Notes in Computer Science}, 
  abstract = {Yao's classical millionaires' problem is about securely determining whether x
        > y, given two input values x,y, which are held as private inputs by two parties,
        respectively. The output x > y becomes known to both parties. In this paper, we
        consider a variant of Yao's problem in which the inputs x,y as well as the output
        bit x > y are encrypted. Referring to the framework of secure n-party computation
        based on threshold homomorphic cryptosystems as put forth by Cramer, Damg{\r
        a}rd, and Nielsen at Eurocrypt 2001, we develop solutions for integer comparison,
        which take as input two lists of encrypted bits representing x and y,
        respectively, and produce an encrypted bit indicating whether x > y as output.
        Secure integer comparison is an important building block for applications such as
        secure auctions. In this paper, our focus is on the two-party case, although most
        of our results extend to the multi-party case. We propose new logarithmic-round
        and constant-round protocols for this setting, which achieve simultaneously very
        low communication and computational complexities. We analyze the protocols in
        detail and show that our solutions compare favorably to other known solutions}, 
  www_section = {homomorphic encryption, Millionaires' problem, secure multi-party
        computation}, 
  isbn = {978-3-540-71676-1}, 
  doi = {10.1007/978-3-540-71677-8_22}, 
  url = {http://dx.doi.org/10.1007/978-3-540-71677-8_22}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/IntegerComparisonSolution2007Garay.pdf},
}
2007_11
@mastersthesis{2007_11,
  title = {Secure asynchronous change notifications for a distributed file system}, 
  author = {Bernhard Amann}, 
  school = {Technische Universit{\"a}t M{\"u}nchen}, 
  volume = {Computer Science}, 
  year = {2007}, 
  month = {November}, 
  address = {Munich, Germany}, 
  pages = {0--74}, 
  abstract = {Distributed file systems have been a topic of interest for a long time and
        there are many file systems that are distributed in one way or another. However
        most distributed file systems are only reasonably usable within a local network
        of computers and some main tasks are still delegated to a very small number of
        servers. Today with the advent of Peer-to-Peer technology, distributed file
        systems that work on top of Peer-to-Peer systems can be built. These systems can
        be built with no or much less centralised components and are usable on a global
        scale. The System Architecture Group at the University of Karlsruhe in Germany
        has developedsuch a file system, which is built on top of a structured overlay
        network and uses Distributed Hash Tables to store and access the information. One
        problem with this approach is, that each file system can only be accessed with
        the help of an identifier, which changes whenever a file system is modified. All
        clients have to be notified of the new identifier in a secure, fast and reliable
        way. Usually the strategy to solve this type of problem is an encrypted
        multicast. This thesis presents and analyses several strategies of using
        multicast distributions to solve this problem and then unveils our final solution
        based on the Subset Difference method proposed by Naor et al}, 
  www_section = {distributed file system, distributed hash table, peer-to-peer networking,
        store information}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Amann\%20-\%20Secure\%20asynchronous\%20change\%20notifications.pdf},
}
2007_12
@conference{2007_12,
  title = {SpoVNet: An Architecture for Supporting Future Internet Applications}, 
  author = {Sebastian Mies}, 
  booktitle = {Proc. 7th W{\"u}rzburg Workshop on IP: Joint EuroFGI and ITG Workshop on
        Visions of Future Generation Networks'}, 
  year = {2007}, 
  address = {W{\"u}rzburg, Germany}, 
  abstract = {This talk presents an approach for providing Spontaneous Virtual Networks
        (SpoVNets) that enable flexible, adaptive, and spontaneous provisioning of
        application-oriented and network-oriented services on top of heterogeneous
        networks. SpoVNets supply new and uniform communication abstrac-tions for future
        Internet applications so applications can make use of advanced services not
        supported by today's Internet. We expect that many functions, which are currently
        provided by SpoVNet on the application layer will become an integral part of
        future networks. Thus, SpoVNet will transparently use advanced services from the
        underlying network infrastructure as they become available (e.g., QoS-support in
        access networks or multicast in certain ISPs), enabling a seamless transition
        from current to future genera-tion networks without modifying the applications}, 
  url = {http://www.tm.uka.de/itm/publications.php?bib=257}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/SpoVNet.pdf ,
        https://git.gnunet.org/bibliography.git/plain/docs/Mies\%20-\%20SpoVNet.pdf}, 
%%%%% ERROR: Missing field
% www_section = {?????},
}
2007_13
@conference{2007_13,
  title = {An Unconditionally Secure Protocol for Multi-Party Set Intersection}, 
  author = {Li, Ronghua and Wu, Chuankun}, 
  booktitle = {Proceedings of the 5th International Conference on Applied Cryptography and
        Network Security}, 
  organization = {Springer-Verlag}, 
  year = {2007}, 
  address = {Berlin, Heidelberg}, 
  publisher = {Springer-Verlag}, 
  abstract = {Existing protocols for private set intersection are based on homomorphic
        public-key encryption and the technique of representing sets as polynomials in
        the cryptographic model. Based on the ideas of these protocols and the
        two-dimensional verifiable secret sharing scheme, we propose a protocol for
        private set intersection in the information-theoretic model. By representing the
        sets as polynomials, the set intersection problem is converted into the task of
        computing the common roots of the polynomials. By sharing the coefficients of the
        polynomials among parties, the common roots can be computed out using the shares.
        As long as more than 2n/3 parties are semi-honest, our protocol
        correctly computes the intersection of nsets, and reveals no other
        information than what is implied by the intersection and the secrets sets
        controlled by the active adversary. This is the first specific protocol for
        private set intersection in the information-theoretic model as far as we know}, 
  www_section = {privacy-preserving set ntersection, secure multi-party computation,
        unconditional security}, 
  isbn = {978-3-540-72737-8}, 
  doi = {10.1007/978-3-540-72738-5_15}, 
  url = {http://dx.doi.org/10.1007/978-3-540-72738-5_15}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/UnconditionallySecureProtocol2007Li.pdf},
}
2007_14
@article{2007_14,
  title = {Vielleicht anonym? Die Enttarnung von StealthNet-Nutzern}, 
  author = {Nils Durner and Nathan S Evans and Christian Grothoff}, 
  journal = {c't magazin f{\"u}r computer technik}, 
  year = {2007}, 
  type = {Report}, 
  www_section = {anonymity, file-sharing, Rshare, Stealthnet}, 
  url = {http://www.heise.de/kiosk/archiv/ct/2007/21/218_Die-Enttarnung-von-StealthNet-Nutzern},
}
2007_2
@mastersthesis{2007_2,
  title = {Cooperative Data Backup for Mobile Devices}, 
  author = {Ludovic Court{\`e}s}, 
  volume = {Ph.D}, 
  year = {2007}, 
  month = {March}, 
  abstract = {Mobile devices such as laptops, PDAs and cell phones are increasingly relied
        on but are used in contexts that put them at risk of physical damage, loss or
        theft. However, few mechanisms are available to reduce the risk of losing the
        data stored on these devices. In this dissertation, we try to address this
        concern by designing a cooperative backup service for mobile devices. The service
        leverages encounters and spontaneous interactions among participating devices,
        such that each device stores data on behalf of other devices. We first provide an
        analytical evaluation of the dependability gains of the proposed service.
        Distributed storage mechanisms are explored and evaluated. Security concerns
        arising from thecooperation among mutually suspicious principals are identified,
        and core mechanisms are proposed to allow them to be addressed. Finally, we
        present our prototype implementation of the cooperative backup service}, 
  www_section = {backup, dependability, P2P, ubiquitous computing}, 
  url = {http://ethesis.inp-toulouse.fr/archive/00000544/}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/phd-thesis.fr_en.pdf}, 
}
2007_3
@article{2007_3,
  title = {Gossip-based Peer Sampling}, 
  author = {Jelasity, M{\'a}rk and Voulgaris, Spyros and Guerraoui, Rachid and Kermarrec,
        Anne-Marie and van Steen, Maarten}, 
  journal = {ACM Trans. Comput. Syst}, 
  volume = {25}, 
  year = {2007}, 
  abstract = {Gossip-based communication protocols are appealing in large-scale distributed
        applications such as information dissemination, aggregation, and overlay topology
        management. This paper factors out a fundamental mechanism at the heart of all
        these protocols: the peer-sampling service. In short, this service provides every
        node with peers to gossip with. We promote this service to the level of a
        first-class abstraction of a large-scale distributed system, similar to a name
        service being a first-class abstraction of a local-area system. We present a
        generic framework to implement a peer-sampling service in a decentralized manner
        by constructing and maintaining dynamic unstructured overlays through gossiping
        membership information itself. Our framework generalizes existing approaches and
        makes it easy to discover new ones. We use this framework to empirically explore
        and compare several implementations of the peer sampling service. Through
        extensive simulation experiments we show that---although all protocols provide a
        good quality uniform random stream of peers to each node locally---traditional
        theoretical assumptions about the randomness of the unstructured overlays as a
        whole do not hold in any of the instances. We also show that different design
        decisions result in severe differences from the point of view of two crucial
        aspects: load balancing and fault tolerance. Our simulations are validated by
        means of a wide-area implementation}, 
  www_section = {epidemic protocols, Gossip-based protocols, peer sampling service}, 
  issn = {0734-2071}, 
  doi = {10.1145/1275517.1275520}, 
  url = {http://doi.acm.org/10.1145/1275517.1275520}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/GossipPeerSampling2007Jelasity.pdf},
}
2007_4
@article{2007_4,
  title = {Gossiping in Distributed Systems}, 
  author = {Kermarrec, Anne-Marie and van Steen, Maarten}, 
  journal = {SIGOPS Oper. Syst. Rev}, 
  volume = {41}, 
  year = {2007}, 
  pages = {2--7}, 
  abstract = {Gossip-based algorithms were first introduced for reliably disseminating data
        in large-scale distributed systems. However, their simplicity, robustness, and
        flexibility make them attractive for more than just pure data dissemination
        alone. In particular, gossiping has been applied to data aggregation, overlay
        maintenance, and resource allocation. Gossiping applications more or less fit the
        same framework, with often subtle differences in algorithmic details determining
        divergent emergent behavior. This divergence is often difficult to understand, as
        formal models have yet to be developed that can capture the full design space of
        gossiping solutions. In this paper, we present a brief introduction to the field
        of gossiping in distributed systems, by providing a simple framework and using
        that framework to describe solutions for various application domains}, 
  issn = {0163-5980}, 
  doi = {10.1145/1317379.1317381}, 
  url = {http://doi.acm.org/10.1145/1317379.1317381}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Gossiping2007Kermarrrec.pdf},
%%%%% ERROR: Missing field
% www_section = {?????},
}
2007_5
@book{2007_5,
  title = {The Iterated Prisoner's Dilemma: 20 Years On}, 
  author = {Graham Kendall and Xin Yao and Siang Yew Ching}, 
  organization = {World Scientific Publishing Co. Pte. Ltd}, 
  volume = {4}, 
  year = {2007}, 
  address = {Singapore}, 
  pages = {0--262}, 
  publisher = {World Scientific Publishing Co. Pte. Ltd}, 
  series = {Advances in Natural Computation}, 
  abstract = {In 1984, Robert Axelrod published a book, relating the story of two
        competitions which he ran, where invited academics entered strategies for "The
        Iterated Prisoners' Dilemma". The book, almost 20 years on, is still widely read
        and cited by academics and the general public. As a celebration of that landmark
        work, we have recreated those competitions to celebrate its 20th anniversary, by
        again inviting academics to submit prisoners' dilemma strategies. The first of
        these new competitions was run in July 2004, and the second in April 2005.
        "Iterated Prisoners' Dilemma: 20 Years On essentially" provides an update of the
        Axelrod's book. Specifically, it presents the prisoners' dilemma, its history and
        variants; highlights original Axelrod's work and its impact; discusses results of
        new competitions; and, showcases selected papers that reflect the latest
        researches in the area}, 
  www_section = {dilemma, iterated prisoners, landmark work}, 
  isbn = {978-981-270-697-3}, 
  issn = {981-270-697-6}, 
}
2007_6
@article{2007_6,
  title = {Mapping an Arbitrary Message to an Elliptic Curve when Defined over GF (2n)}, 
  author = {Brian King}, 
  journal = {International Journal of Network Security}, 
  volume = {8}, 
  year = {2007}, 
  month = {March}, 
  chapter = {169}, 
  pages = {169--176}, 
  abstract = {The use of elliptic curve cryptography (ECC) when used as a public-key
        cryptosystem for encryption is such that if one has a message to encrypt, then
        they attempt to map it to some point in the prime subgroup of the elliptic curve
        by systematically modifying the message in a determinis- tic manner. The
        applications typically used for ECC are the key-exchange, digital signature or a
        hybrid encryption systems (ECIES) all of which avoid this problem. In this paper
        we provide a deterministic method that guarantees that the map of a message to an
        elliptic curve point can be made without any modification. This paper provides a
        solution to the open problem posed in [7] concerning the creation of a
        deterministic method to map arbitrary message to an elliptic curve}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/ijns-2009-v8-n2-p169-176.pdf},
%%%%% ERROR: Missing field
% www_section = {?????},
}
2007_7
@book{2007_7,
  title = {Multiparty Computation for Interval, Equality, and Comparison Without
        Bit-Decomposition Protocol}, 
  author = {Nishide, Takashi and Ohta, Kazuo}, 
  booktitle = {Public Key Cryptography -- PKC 2007}, 
  organization = {Springer Berlin Heidelberg}, 
  volume = {4450}, 
  year = {2007}, 
  pages = {343--360}, 
  editor = {Okamoto, Tatsuaki and Wang, Xiaoyun}, 
  publisher = {Springer Berlin Heidelberg}, 
  series = {Lecture Notes in Computer Science}, 
  abstract = {Damg{\r a}rd et al. [11] showed a novel technique to convert a polynomial
        sharing of secret a into the sharings of the bits of a in constant rounds, which
        is called the bit-decomposition protocol. The bit-decomposition protocol is a
        very powerful tool because it enables bit-oriented operations even if shared
        secrets are given as elements in the field. However, the bit-decomposition
        protocol is relatively expensive. In this paper, we present a simplified
        bit-decomposition protocol by analyzing the original protocol. Moreover, we
        construct more efficient protocols for a comparison, interval test and equality
        test of shared secrets without relying on the bit-decomposition protocol though
        it seems essential to such bit-oriented operations. The key idea is that we do
        computation on secret a with c and r where c = a + r, c is a revealed value, and
        r is a random bitwise-shared secret. The outputs of these protocols are also
        shared without being revealed. The realized protocols as well as the original
        protocol are constant-round and run with less communication rounds and less data
        communication than those of [11]. For example, the round complexities are reduced
        by a factor of approximately 3 to 10}, 
  www_section = {Bitwise Sharing, Multiparty Computation, secret sharing}, 
  isbn = {978-3-540-71676-1}, 
  doi = {10.1007/978-3-540-71677-8_23}, 
  url = {http://dx.doi.org/10.1007/978-3-540-71677-8_23}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/MultiPartyComputation2007Nishide.pdf},
}
2007_8
@conference{2007_8,
  title = {A New Efficient Privacy-preserving Scalar Product Protocol}, 
  author = {Amirbekyan, Artak and Estivill-Castro, Vladimir}, 
  booktitle = {Proceedings of the Sixth Australasian Conference on Data Mining and
        Analytics--Volume 70}, 
  organization = {Australian Computer Society, Inc}, 
  year = {2007}, 
  address = {Darlinghurst, Australia, Australia}, 
  publisher = {Australian Computer Society, Inc}, 
  abstract = {Recently, privacy issues have become important in data analysis, especially
        when data is horizontally partitioned over several parties. In data mining, the
        data is typically represented as attribute-vectors and, for many applications,
        the scalar (dot) product is one of the fundamental operations that is repeatedly
        used. In privacy-preserving data mining, data is distributed across several
        parties. The efficiency of secure scalar products is important, not only because
        they can cause overhead in communication cost, but dot product operations also
        serve as one of the basic building blocks for many other secure protocols.
        Although several solutions exist in the relevant literature for this problem, the
        need for more efficient and more practical solutions still remains. In this
        paper, we present a very efficient and very practical secure scalar product
        protocol. We compare it to the most common scalar product protocols. We not only
        show that our protocol is much more efficient than the existing ones, we also
        provide experimental results by using a real life dataset}, 
  www_section = {privacy preserving data mining}, 
  isbn = {978-1-920682-51-4}, 
  url = {http://dl.acm.org/citation.cfm?id=1378245.1378274}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/PrivacyPreserving2007Ambirbekyan.pdf},
}
2007_9
@conference{2007_9,
  title = {ParaNets: A Parallel Network Architecture for Challenged Networks}, 
  author = {Khaled A. Harras and Mike P. Wittie and Kevin C. Almeroth and Elizabeth M.
        Belding}, 
  booktitle = {ParaNets: A Parallel Network Architecture for Challenged Networks}, 
  year = {2007}, 
  month = {March}, 
  abstract = {Networks characterized by challenges, such as intermittent connectivity,
        network heterogeneity, and large delays, are called "challenged networks". We
        propose a novel network architecture for challenged networks dubbed Parallel
        Networks, or, ParaNets. The vision behind ParaNets is to have challenged network
        protocols operate over multiple heterogenous networks, simultaneously available,
        through one or more devices. We present the ParaNets architecture and discuss its
        short-term challenges and longterm implications. We also argue, based on current
        research trends and the ParaNets architecture, for the evolution of the
        conventional protocol stack to a more flexible cross-layered protocol tree. To
        demonstrate the potential impact of ParaNets, we use Delay Tolerant Mobile
        Networks (DTMNs) as a representative challenged network over which we evaluate
        ParaNets. Our ultimate goal in this paper is to open the way for further work in
        challenged networks using ParaNets as the underlying architecture}, 
  isbn = {978-0-7695-3001-7}, 
  url = {http://ieeexplore.ieee.org/Xplore/login.jsp?reload=true\&url=http\%3A\%2F\%2Fieeexplore.ieee.org\%2Fiel5\%2F4389542\%2F4389543\%2F04389561.pdf\%3Farnumber\%3D4389561\&authDecision=-203},
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/hotmobile07.pdf}, 
%%%%% ERROR: Missing field
% www_section = {?????},
}
2008_0
@conference{2008_0,
  title = {AmbiComp: A platform for distributed execution of Java programs on embedded
        systems by offering a single system image}, 
  author = {Johannes Eickhold and Thomas Fuhrmann and Bjoern Saballus and Sven Schlender
        and Thomas Suchy}, 
  booktitle = {AmI-Blocks'08, European Conference on Ambient Intelligence 2008 manuscript
        No}, 
  year = {2008}, 
  month = {January}, 
  abstract = {Ambient Intelligence pursues the vision that small networked computers will
        jointly perform tasks that create the illusion of an intelligent environment. One
        of the most pressing challenges in this context is the question how one could
        easily develop software for such highly complex, but resource-scarce systems. In
        this paper we present a snapshot of our ongoing work towards facilitating oftware
        development for Am- bient Intelligence systems. In particular, we present the
        AmbiComp [1] platform. It consists of small, modular hardware, a exible rmware
        including a Java Virtual Machine, and an Eclipse-based integrated development
        environment}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/publ_2008_eickhold-fuhrmann-saballus-ua_ambicomp.pdf},
%%%%% ERROR: Missing field
% www_section = {?????},
}
2008_1
@conference{2008_1,
  title = {Analyzing Unreal Tournament 2004 Network Traffic Characteristics}, 
  author = {H{\"u}bsch, Christian}, 
  booktitle = {CGAT'08 Singapore, 28th-30th}, 
  year = {2008}, 
  abstract = {With increasing availability of high-speed access links in the private
        sector, online real-time gaming has become a major and still growing segment in
        terms of market and network impact today. One of the most popular games is Unreal
        Tournament 2004, a fast-paced action game that still ranks within the top 10 of
        the most-played multiplayer Internet-games, according to GameSpy [1]. Besides
        high demands in terms of graphical computation, games like Unreal also impose
        hard requirements regarding network packet delay and jitter, for small
        deterioration in these conditions influences gameplay recognizably. To make
        matters worse, such games generate a very specific network traffic with strong
        requirements in terms of data delivery. In this paper, we analyze the network
        traffic characteristics of Unreal Tournament 2004. The experiments include
        different aspects like variation of map sizes, player count, player behavior as
        well as hardware and game-specific configuration. We show how different operating
        systems influence network behavior of the game. Our work gives a promising
        picture of how the specific real-time game behaves in terms of network impact and
        may be used as a basis e.g. for the development of specialized traffic
        generators}, 
  url = {http://www.tm.uka.de/itm/WebMan/view.php?view=publikationen_detail\&id=295}, 
%%%%% ERROR: Missing field
% www_section = {?????},
}
2008_10
@conference{2008_10,
  title = {Robust De-anonymization of Large Sparse Datasets}, 
  author = {Narayanan, Arvind and Shmatikov, Vitaly}, 
  booktitle = {Proceedings of the 2008 IEEE Symposium on Security and Privacy}, 
  organization = {IEEE Computer Society}, 
  year = {2008}, 
  address = {Washington, DC, USA}, 
  publisher = {IEEE Computer Society}, 
  abstract = {We present a new class of statistical deanonymization attacks against
        high-dimensional micro-data, such as individual preferences, recommendations,
        transaction records and so on. Our techniques are robust to perturbation in the
        data and tolerate some mistakes in the adversary's background knowledge. We apply
        our de-anonymization methodology to the Netflix Prize dataset, which contains
        anonymous movie ratings of 500,000 subscribers of Netflix, the world's largest
        online movie rental service. We demonstrate that an adversary who knows only a
        little bit about an individual subscriber can easily identify this subscriber's
        record in the dataset. Using the Internet Movie Database as the source of
        background knowledge, we successfully identified the Netflix records of known
        users, uncovering their apparent political preferences and other potentially
        sensitive information}, 
  www_section = {anonymity, attack, privacy}, 
  isbn = {978-0-7695-3168-7}, 
  doi = {10.1109/SP.2008.33}, 
  url = {http://dx.doi.org/10.1109/SP.2008.33}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Deanonymization2008narayanan.pdf},
}
2008_11
@booklet{2008_11,
  title = {The Spontaneous Virtual Networks Architecture for Supporting Future Internet
        Services and Applications}, 
  author = {Roland Bless and Oliver Waldhorst and Mayer, Christoph P.}, 
  year = {2008}, 
  publisher = {NEC, Heidelberg}, 
  note = {Vortrag auf dem Fachgespr{\"a}ch der GI/ITG-Fachgruppe
        {\textquoteleft}{\textquoteleft}Kommunikation und Verteilte Systeme'' Future
        Internet}, 
%%%%% ERROR: Missing field
% www_section = {?????},
}
2008_12
@conference{2008_12,
  title = {Towards Empirical Aspects of Secure Scalar Product}, 
  author = {I-Cheng Wang and Chih-Hao Shen and Tsan-sheng Hsu and Churn-Chung Liao and
        Da-Wei Wang and Zhan, J.}, 
  booktitle = {Information Security and Assurance, 2008. ISA 2008. International Conference
        on}, 
  year = {2008}, 
  month = {April}, 
  abstract = {Privacy is ultimately important, and there is a fair amount of research about
        it. However, few empirical studies about the cost of privacy are conducted. In
        the area of secure multiparty computation, the scalar product has long been
        reckoned as one of the most promising building blocks in place of the classic
        logic gates. The reason is not only the scalar product complete, which is as good
        as logic gates, but also the scalar product is much more efficient than logic
        gates. As a result, we set to study the computation and communication resources
        needed for some of the most well-known and frequently referred secure
        scalar-product protocols, including the composite-residuosity, the
        invertible-matrix, the polynomial-sharing, and the commodity-based approaches.
        Besides the implementation remarks of these approaches, we analyze and compare
        their execution time, computation time, and random number consumption, which are
        the most concerned resources when talking about secure protocols. Moreover,
        Fairplay the benchmark approach implementing Yao's famous circuit evaluation
        protocol, is included in our experiments in order to demonstrate the potential
        for the scalar product to replace logic gates}, 
  www_section = {circuit evaluation protocol, Circuits, commodity-based, composite
        residuosity, composite-residuosity, Computational efficiency, Costs, data
        privacy, empirical survey, Information science, information security,
        invertible-matrix, logic gates, polynomial-sharing, Polynomials, privacy,
        Proposals, protocols, scalar-product, secure multiparty computation, secure
        protocols, Secure scalar product, secure scalar-product protocols}, 
  doi = {10.1109/ISA.2008.78}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/EmpiricalAspects2009Wang.pdf},
}
2008_13
@conference{2008_13,
  title = {The Underlay Abstraction in the Spontaneous Virtual Networks (SpoVNet)
        Architecture}, 
  author = {Roland Bless and H{\"u}bsch, Christian and Sebastian Mies and Oliver
        Waldhorst}, 
  booktitle = {Proc. 4th EuroNGI Conf. on Next Generation Internet Networks (NGI 2008)}, 
  year = {2008}, 
  address = {Krakow, Poland}, 
  pages = {115--122}, 
  abstract = {Next generation networks will combine many heterogeneous access technologies
        to provide services to a large number of highly mobile users while meeting their
        demands for quality of service, robustness, and security. Obviously, this is not
        a trivial task and many protocols fulfilling some combination of these
        requirements have been proposed. However, non of the current proposals meets all
        requirements, and the deployment of new applications and services is hindered by
        a patchwork of protocols. This paper presents Spontaneous Virtual Networks
        (SpoVNet), an architecture that fosters the creation of new applications and
        services for next generation networks by providing an underlay abstraction layer.
        This layer applies an overlay-based approach to cope with mobility, multi-homing,
        and heterogeneity. For coping with network mobility, it uses a SpoVNet-specific
        addressing scheme, splitting node identifiers from network locators and providing
        persistent connections by transparently switching locators. To deal with
        multihoming it transparently chooses the most appropriate pair of network
        locators for each connection. To cope with network and protocol heterogeneity, it
        uses dedicated overlay nodes, e.g., for relaying between IPv4 and IPv6 hosts}, 
  www_section = {heterogeneity, robustness}, 
  url = {http://www.tm.uka.de/itm/WebMan/view.php?view=publikationen_detail\&id=283}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/underlayabs-ngi08-final.pdf},
}
2008_14
@article{2008_14,
  title = {Unerkannt. Anonymisierende Peer-to-Peer-Netze im {\"U}berblick}, 
  author = {Nils Durner and Nathan S Evans and Christian Grothoff}, 
  journal = {iX magazin f{\"u}r professionelle informationstechnik}, 
  year = {2008}, 
  type = {Survey}, 
  url = {http://www.heise.de/kiosk/archiv/ix/2008/9/88_Anonyme-Peer-to-Peer-Netze-im-Ueberblick},
%%%%% ERROR: Missing field
% www_section = {?????},
}
2008_15
@article{2008_15,
  title = {What Can We Learn Privately?}, 
  author = {Shiva Prasad Kasiviswanathan and Homin K. Lee and Kobbi Nissim and Sofya
        Raskhodnikova and Adam Smith}, 
  journal = {CoRR}, 
  volume = {abs/0803.0924}, 
  year = {2008}, 
  abstract = {Learning problems form an important category of computational tasks that
        generalizes many of the computations researchers apply to large real-life data
        sets. We ask: what concept classes can be learned privately, namely, by an
        algorithm whose output does not depend too heavily on any one input or specific
        training example? More precisely, we investigate learning algorithms that satisfy
        differential privacy, a notion that provides strong confidentiality guarantees in
        contexts where aggregate information is released about a database containing
        sensitive information about individuals. We demonstrate that, ignoring
        computational constraints, it is possible to privately agnostically learn any
        concept class using a sample size approximately logarithmic in the cardinality of
        the concept class. Therefore, almost anything learnable is learnable privately:
        specifically, if a concept class is learnable by a (non-private) algorithm with
        polynomial sample complexity and output size, then it can be learned privately
        using a polynomial number of samples. We also present a computationally efficient
        private PAC learner for the class of parity functions. Local (or randomized
        response) algorithms are a practical class of private algorithms that have
        received extensive investigation. We provide a precise characterization of local
        private learning algorithms. We show that a concept class is learnable by a local
        algorithm if and only if it is learnable in the statistical query (SQ) model.
        Finally, we present a separation between the power of interactive and
        noninteractive local learning algorithms}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/WhatCanWeLearnPrivately2008Kasiviswanthan.pdf},
%%%%% ERROR: Missing field
% www_section = {?????},
}
2008_2
@conference{2008_2,
  title = {Consistency Management for Peer-to-Peer-based Massively Multiuser Virtual
        Environments}, 
  author = {Gregor Schiele and Richard S{\"u}selbeck and Arno Wacker and Triebel, Tonio and
        Christian Becker}, 
  booktitle = {Proc. 1st Int.Workshop on Massively Multiuser Virtual Environments
        (MMVE'08)}, 
  year = {2008}, 
  url = {http://www.spovnet.de/files/publications/MMVEConsistency.pdf/view}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/MMVEConsistency.pdf}, 
%%%%% ERROR: Missing field
% www_section = {?????},
}
2008_3
@mastersthesis{2008_3,
  title = {The Decentralized File System Igor-FS as an Application for Overlay-Networks}, 
  author = {unknown}, 
  school = {Universit{\"a}t Fridericiana (TH)}, 
  volume = {Engineering}, 
  year = {2008}, 
  month = {February}, 
  address = {Karlsruhe, Germany}, 
  pages = {0--193}, 
  type = {Doctoral}, 
  abstract = {Working in distributed systems is part of the information society. More and
        more people and organizations work with growing data volumes. Often, part of the
        problem is to access large files in a share way. Until now, there are two often
        used approaches to allow this kind off access. Either the files are tranfered via
        FTP, e-mail or similar medium before the access happens, or a centralized server
        provides file services. The first alternative has the disadvantage that the
        entire file has to be transfered before the first access can be successful. If
        only small parts in the file have been changed compared to a previous version,
        the entire file has to be transfered anyway. The centralized approach has
        disadvantages regarding scalability and reliability. In both approaches
        authorization and authentication can be difficult in case users are seperated by
        untrusted network segements}, 
  url = {http://digbib.ubka.uni-karlsruhe.de/volltexte/1000009668}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Kutzner\%20-\%20The\%20descentralized\%20file\%20system\%20Igor-FS\%20as\%20an\%20application_0.pdf},
%%%%% ERROR: Missing field
% www_section = {?????},
}
2008_4
@book{2008_4,
  title = {Estimating The Size Of Peer-To-Peer Networks Using Lambert's W Function}, 
  author = {Javier Bustos-Jim{\'e}nez and Nicol{\'a}s Bersano and Satu Elisa Schaeffer and
        Jos{\'e} Miguel Piquer and Alexandru Iosup and Augusto Ciuffoletti}, 
  booktitle = {Grid Computing--Achievements and Prospects}, 
  organization = {Springer-Verlag}, 
  year = {2008}, 
  address = {New York, NY, USA}, 
  pages = {61--72}, 
  publisher = {Springer-Verlag}, 
  abstract = {In this work, we address the problem of locally estimating the size of a
        Peer-to-Peer (P2P) network using local information. We present a novel approach
        for estimating the size of a peer-to-peer (P2P) network, fitting the sum of new
        neighbors discovered at each iteration of a breadth-first search (BFS) with a
        logarithmic function, and then using Lambert's W function to solve a root of a
        ln(n) + b--n = 0, where n is the network size. With rather little computation, we
        reach an estimation error of at most 10 percent, only allowing the BFS to iterate
        to the third level}, 
  www_section = {distributed computing, lambert w function, network size estimation,
        peer-to-peer networking}, 
  isbn = {978-0-387-09456-4}, 
  issn = {978-0-387-09456-4}, 
  url = {http://eprints.adm.unipi.it/649/}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Grid\%20Computing\%20-\%20Estimating\%20The\%20Size\%20Of\%20Peer-To-Peer\%20Networks.pdf},
}
2008_5
@article{2008_5,
  title = {On the False-positive Rate of Bloom Filters}, 
  author = {Bose, Prosenjit and Guo, Hua and Kranakis, Evangelos and Maheshwari, Anil and
        Morin, Pat and Morrison, Jason and Smid, Michiel and Tang, Yihui}, 
  journal = {Inf. Process. Lett}, 
  volume = {108}, 
  year = {2008}, 
  pages = {210--213}, 
  abstract = {Bloom filters are a randomized data structure for membership queries dating
        back to 1970. Bloom filters sometimes give erroneous answers to queries, called
        false positives. Bloom analyzed the probability of such erroneous answers, called
        the false-positive rate, and Bloom's analysis has appeared in many publications
        throughout the years. We show that Bloom's analysis is incorrect and give a
        correct analysis}, 
  www_section = {Analysis of algorithms, data structures}, 
  issn = {0020-0190}, 
  doi = {10.1016/j.ipl.2008.05.018}, 
  url = {http://dx.doi.org/10.1016/j.ipl.2008.05.018}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/FalsepositiverateBloomFilter2008Bose.pdf},
}
2008_6
@conference{2008_6,
  title = {Higher Confidence in Event Correlation Using Uncertainty Restrictions}, 
  author = {Gerald G. Koch and Boris Koldehofe and Kurt Rothermel}, 
  booktitle = {28th International Conference on In Distributed Computing Systems
        Workshops}, 
  year = {2008}, 
  abstract = {Distributed cooperative systems that use event notification for communication
        can benefit from event correlation within the notification network. In the
        presence of uncertain data, however, correlation results easily become
        unreliable. The handling of uncertainty is therefore an important challenge for
        event correlation in distributed event notification systems. In this paper, we
        present a generic correlation model that is aware of uncertainty. We propose
        uncertainty constraints that event correlation can take into account and show how
        they can lead to higher confidence in the correlation result. We demonstrate that
        the application of this model allows to obtain a qualitative description of event
        correlation}, 
  url = {http://www.citeulike.org/user/nmsx/article/4505416}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/koch08confidence.pdf}, 
%%%%% ERROR: Missing field
% www_section = {?????},
}
2008_7
@conference{2008_7,
  title = {Improving User and ISP Experience through ISP-aided P2P Locality}, 
  author = {Vinay Aggarwal and Obi Akonjang and Feldmann, Anja}, 
  booktitle = {GI'08. Proceedings of 11th IEEE Global Internet Symposium 2008}, 
  organization = {IEEE Computer Society}, 
  year = {2008}, 
  month = {April}, 
  address = {Phoenix, AZ}, 
  publisher = {IEEE Computer Society}, 
  abstract = {Despite recent improvements, P2P systems are still plagued by fundamental
        issues such as overlay/underlay topological and routing mismatch, which affects
        their performance and causes traffic strains on the ISPs. In this work, we aim to
        improve overall system performance for ISPs as well as P2P systems by means of
        traffic localization through improved collaboration between ISPs and P2P systems.
        More specifically, we study the effects of different ISP/P2P topologies as well
        as a broad range of influential user behavior characteristics, namely content
        availability, churn, and query patterns, on end-user and ISP experience. We show
        that ISP-aided P2P locality benefits both P2P users and ISPs, measured in terms
        of improved content download times, increased network locality of query responses
        and desired content, and overall reduction in P2P traffic}, 
  www_section = {isp, P2P}, 
  isbn = {978-1-4244-2219-7}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/isp-aidedp2p.PDF}, 
}
2008_8
@article{2008_8,
  title = {Privacy guarantees through distributed constraint satisfaction}, 
  author = {Boi Faltings and Thomas Leaute and Adrian Petcu}, 
  journal = {unknown}, 
  institution = {Swiss Federal Institute of Technology (EPFL)}, 
  number = {12}, 
  year = {2008}, 
  month = {April}, 
  address = {Lausanne, Switzerland}, 
  type = {Tech report}, 
  abstract = {Abstract. In Distributed Constraint Satisfaction Problems, agents often
        desire to find a solution while revealing as little as possible about their
        variables and constraints. So far, most algorithms for DisCSP do not guarantee
        privacy of this information. This paper describes some simple obfuscation
        techniques that can be used with DisCSP algorithms such as DPOP, and provide
        sensible privacy guarantees based on the distributed solving process without
        sacrificing its efficiency}, 
  www_section = {algorithms, DisCSP algorithm, distributed constraint satisfaction,
        optimization, privacy, SMC}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Tech\%20Report\%20-\%20Privacy\%20guarantees\%20through\%20DCS.pdf},
}
2008_9
@article{2008_9,
  title = {Progressive Strategies for Monte-Carlo Tree Search}, 
  author = {Guillaume M. J-B. Chaslot and Mark H. M. Winands and H. Jaap van den Herik and
        Jos W. H. M. Uiterwijk and Bruno Bouzy}, 
  journal = {New Mathematics and Natural Computation}, 
  volume = {4}, 
  year = {2008}, 
  pages = {343--357}, 
  abstract = {Monte-Carlo Tree Search (MCTS) is a new best-first search guided by the
        results of Monte-Carlo simulations. In this article, we introduce two progressive
        strategies for MCTS, called progressive bias and progressive unpruning. They
        enable the use of relatively time-expensive heuristic knowledge without speed
        reduction. Progressive bias directs the search according to heuristic knowledge.
        Progressive unpruning first reduces the branching factor, and then increases it
        gradually again. Experiments assess that the two progressive strategies
        significantly improve the level of our Go program Mango. Moreover, we see that
        the combination of both strategies performs even better on larger board sizes}, 
  www_section = {computer go, MCTS heuristic search, Monte-Carlo Tree Search}, 
  doi = {10.1142/S1793005708001094}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/NMNC\%20-\%20Progressive\%20strategies\%20for\%20MCTS.pdf},
}
2009_0
@article{2009_0,
  title = {Brahms: Byzantine Resilient Random Membership Sampling}, 
  author = {Edward Bortnikov and Maxim Gurevich and Idit Keidar and Gabriel Kliot and
        Alexander Shraer}, 
  journal = {Computer Networks Journal (COMNET), Special Issue on Gossiping in Distributed
        Systems}, 
  year = {2009}, 
  month = {April}, 
  www_section = {Byzantine Resilient Sampling, Random Membership, random sampling}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Brahms-Comnet-Mar09.pdf
        , https://git.gnunet.org/bibliography.git/plain/docs/Brahms-rps-mar09.pdf}, 
  url = {https://bibliography.gnunet.org}, 
}
2009_1
@conference{2009_1,
  title = {CLIO/UNISONO: practical distributed and overlay- wide network measurement}, 
  author = {Ralph Holz and Dirk Haage}, 
  booktitle = {CLIO/UNISONO: practical distributed and overlay-wide network measurement}, 
  year = {2009}, 
  abstract = {Building on previous work, we present an early version of our CLIO/UNISONO
        framework for distributed network measurements. CLIO/UNISONO is a generic
        measurement framework specifically aimed at overlays that need measurements for
        optimization purposes. In this talk, we briefly introduce the most important
        concepts and then focus on some more advanced mechanisms like measurements across
        connectivity domains and remote orders}, 
  url = {https://bibliography.gnunet.org}, 
  www_section = {Unsorted}, 
}
2009_10
@conference{2009_10,
  title = {Peer Profiling and Selection in the I2P Anonymous Network}, 
  author = {Lars Schimmer}, 
  booktitle = {PET-CON 2009.1}, 
  year = {2009}, 
  month = {March}, 
  address = {TU Dresden, Germany}, 
  www_section = {I2P}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/I2P-PET-CON-2009.1.pdf},
  url = {https://bibliography.gnunet.org}, 
}
2009_11
@conference{2009_11,
  title = {Privacy Integrated Queries: An Extensible Platform for Privacy-preserving Data
        Analysis}, 
  author = {McSherry, Frank D.}, 
  booktitle = {Proceedings of the 2009 ACM SIGMOD International Conference on Management of
        Data}, 
  organization = {ACM}, 
  year = {2009}, 
  address = {New York, NY, USA}, 
  publisher = {ACM}, 
  abstract = {We report on the design and implementation of the Privacy Integrated Queries
        (PINQ) platform for privacy-preserving data analysis. PINQ provides analysts with
        a programming interface to unscrubbed data through a SQL-like language. At the
        same time, the design of PINQ's analysis language and its careful implementation
        provide formal guarantees of differential privacy for any and all uses of the
        platform. PINQ's unconditional structural guarantees require no trust placed in
        the expertise or diligence of the analysts, substantially broadening the scope
        for design and deployment of privacy-preserving data analysis, especially by
        non-experts}, 
  www_section = {anonymization, confidentiality, Differential Privacy, linq}, 
  isbn = {978-1-60558-551-2}, 
  doi = {10.1145/1559845.1559850}, 
  url = {http://doi.acm.org.eaccess.ub.tum.de/10.1145/1559845.1559850}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/PrivacyIntergratedQueries2009McSherry.pdf},
}
2009_12
@article{2009_12,
  title = {Robust Random Number Generation for Peer-to-Peer Systems}, 
  author = {Awerbuch, Baruch and Scheideler, Christian}, 
  journal = {Theor. Comput. Sci}, 
  volume = {410}, 
  year = {2009}, 
  pages = {453--466}, 
  abstract = {We consider the problem of designing an efficient and robust distributed
        random number generator for peer-to-peer systems that is easy to implement and
        works even if all communication channels are public. A robust random number
        generator is crucial for avoiding adversarial join-leave attacks on peer-to-peer
        overlay networks. We show that our new generator together with a light-weight
        rule recently proposed in [B. Awerbuch, C. Scheideler, Towards a scalable and
        robust DHT, in: Proc. of the 18th ACM Symp. on Parallel Algorithms and
        Architectures, SPAA, 2006. See also http://www14.in.tum.de/personen/scheideler]
        for keeping peers well distributed can keep various structured overlay networks
        in a robust state even under a constant fraction of adversarial peers}, 
  www_section = {Join-leave attacks, Peer-to-peer systems, Random number generation}, 
  issn = {0304-3975}, 
  doi = {10.1016/j.tcs.2008.10.003}, 
  url = {http://dx.doi.org/10.1016/j.tcs.2008.10.003}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/OPODIS-116b.pdf}, 
}
2009_13
@conference{2009_13,
  title = {Security and Privacy Challenges in the Internet of Things}, 
  author = {Mayer, Christoph P.}, 
  booktitle = {Proceedings of KiVS Workshop on Global Sensor Networks (GSN09)}, 
  year = {2009}, 
  note = {http://eceasst.cs.tu-berlin.de/index.php/eceasst/article/download/208/205}, 
  abstract = {The future Internet of Things as an intelligent collaboration of miniaturized
        sensors poses new challenges to security and end-user privacy. The ITU has
        identified that the protection of data and privacy of users is one of the key
        challenges in the Internet of Things [Int05]: lack of confidence about privacy
        will result in decreased adoption among users and therefore is one of the driving
        factors in the success of the Internet of Things. This paper gives an overview,
        categorization, and analysis of security and privacy challenges in the Internet
        of Things}, 
  url = {http://doc.tm.uka.de/2009/security-gsn-camera-ready.pdf}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/gsn09-security-mayer.pdf},
%%%%% ERROR: Missing field
% www_section = {?????},
}
2009_14
@book{2009_14,
  title = {Self-organized Data Redundancy Management for Peer-to-Peer Storage Systems}, 
  author = {Yaser Houri and Manfred Jobmann and Thomas Fuhrmann}, 
  booktitle = {Self-Organizing Systems}, 
  volume = {Volume 5918/2009}, 
  year = {2009}, 
  pages = {65--76}, 
  publisher = {unknown}, 
  series = {Lecture Notes in Computer Science}, 
  abstract = {In peer-to-peer storage systems, peers can freely join and leave the system
        at any time. Ensuring high data availability in such an environment is a
        challenging task. In this paper we analyze the costs of achieving data
        availability in fully decentralized peer-to-peer systems. We mainly address the
        problem of churn and what effect maintaining availability has on network
        bandwidth. We discuss two different redundancy techniques -- replication and
        erasure coding -- and consider their monitoring and repairing costs analytically.
        We calculate the bandwidth costs using basic costs equations and two different
        Markov reward models. One for centralized monitoring system and the other for
        distributed monitoring. We show a comparison of the numerical results
        accordingly. Depending on these results, we determine the best redundancy and
        maintenance strategy that corresponds to peer's failure probability}, 
  www_section = {distributed storage, Markov chain}, 
  issn = {978-3-642-10864-8}, 
  doi = {10.1007/978-3-642-10865-5}, 
  url = {http://www.springerlink.com/content/28660w27373vh408/}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/fulltext3.pdf}, 
}
2009_15
@conference{2009_15,
  title = {ShadowWalker: Peer-to-peer Anonymous Communication Using Redundant Structured
        Topologies}, 
  author = {Mittal, Prateek and Borisov, Nikita}, 
  booktitle = {Proceedings of the 16th ACM Conference on Computer and Communications
        Security}, 
  organization = {ACM}, 
  year = {2009}, 
  address = {New York, NY, USA}, 
  publisher = {ACM}, 
  abstract = {Peer-to-peer approaches to anonymous communication pro- mise to eliminate the
        scalability concerns and central vulner- ability points of current networks such
        as Tor. However, the P2P setting introduces many new opportunities for attack,
        and previous designs do not provide an adequate level of anonymity. We propose
        ShadowWalker: a new low-latency P2P anonymous communication system, based on a
        random walk over a redundant structured topology. We base our de- sign on shadows
        that redundantly check and certify neigh- bor information; these certifications
        enable nodes to perform random walks over the structured topology while avoiding
        route capture and other attacks. We analytically calculate the anonymity provided
        by Sha- dowWalker and show that it performs well for moderate lev- els of
        attackers, and is much better than the state of the art. We also design an
        extension that improves forwarding per- formance at a slight anonymity cost,
        while at the same time protecting against selective DoS attacks. We show that our
        system has manageable overhead and can handle moderate churn, making it an
        attractive new design for P2P anony- mous communication}, 
  www_section = {anonymity, peer-to-peer, random walks}, 
  isbn = {978-1-60558-894-0}, 
  doi = {10.1145/1653662.1653683}, 
  url = {http://doi.acm.org/10.1145/1653662.1653683}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/shadowwalker-ccs09.pdf},
}
2009_16
@article{2009_16,
  title = {A Software and Hardware IPTV Architecture for Scalable DVB Distribution}, 
  author = {unknown}, 
  journal = {International Journal of Digital Multimedia Broadcasting}, 
  volume = {2009}, 
  year = {2009}, 
  editor = {Georg Acher and Detlef Fliegl and Thomas Fuhrmann}, 
  abstract = {Many standards and even more proprietary technologies deal with IP-based
        television (IPTV). But none of them can transparently map popular public
        broadcast services such as DVB or ATSC to IPTV with acceptable effort. In this
        paper we explain why we believe that such a mapping using a light weight
        framework is an important step towards all-IP multimedia. We then present the
        NetCeiver architecture: it is based on well-known standards such as IPv6, and it
        allows zero configuration. The use of multicast streaming makes NetCeiver highly
        scalable. We also describe a low cost FPGA implementation of the proposed
        NetCeiver architecture, which can concurrently stream services from up to six
        full transponders}, 
  www_section = {DVB, IPTV, multicast}, 
  doi = {10.1155/2009/617203}, 
  url = {http://www.hindawi.com/journals/ijdmb/2009/617203.html}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/617203.pdf}, 
}
2009_17
@mastersthesis{2009_17,
  title = {Solving very large distributed constraint satisfaction problems}, 
  author = {Peter Harvey}, 
  school = {University of Wollongog, New South Wales, Australia}, 
  volume = {Doctor of Philosophy}, 
  year = {2009}, 
  month = {December}, 
  address = {Wollongog, New South Wales, Australia}, 
  pages = {0--211}, 
  type = {PhD}, 
  abstract = {This thesis investigates issues with existing approaches to distributed
        constraint satisfaction, and proposes a solution in the form of a new algorithm.
        These issues are most evident when solving large distributed constraint
        satisfaction problems, hence the title of the thesis. We will first survey
        existing algorithms for centralised constraint satisfaction, and describe how
        they have been modified to handle distributed constraint satisfaction. The method
        by which each algorithm achieves completeness will be investigated and analysed
        by application of a new theorem. We will then present a new algorithm,
        Support-Based Distributed Search, developed explicitly for distributed constraint
        satisfaction rather than being derived from centralised algorithms. This
        algorithm is inspired by the inherent structure of human arguments and similar
        mechanisms we observe in real-world negotiations. A number of modifications to
        this new algorithm are considered, and comparisons are made with existing
        algorithms, effectively demonstrating its place within the field. Empirical
        analysis is then conducted, and comparisons are made to state-of-the-art
        algorithms most able to handle large distributed constraint satisfaction
        problems. Finally, it is argued that any future development in distributed
        constraint satisfaction will necessitate changes in the algorithms used to solve
        small {\textquoteleft}embedded' constraint satisfaction problems. The impact on
        embedded constraint satisfaction problems is considered, with a brief
        presentation of an improved algorithm for hypertree decomposition}, 
  www_section = {algorithms, distributed constraint satisfaction}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Thesis\%20-\%20P.Harvey.pdf},
}
2009_18
@conference{2009_18,
  title = {SPINE : Adaptive Publish/Subscribe for Wireless Mesh Networks}, 
  author = {Jorge Alfonso Briones-Garc{\i}a and Boris Koldehofe and Kurt Rothermel}, 
  booktitle = {Proc of the 8th IEEE International Conference on Innovative Internet
        Community Systems (I2CS 2008)}, 
  year = {2009}, 
  abstract = {Application deployment on Wireless Mesh Networks (WMNs) is a challenging
        issue. First it requires communication abstractions that allow for interoperation
        with Internet applications and second the offered solution should be sensitive to
        the available resources in the underlying network. Loosely coupled communication
        abstractions, like publish/subscribe, promote interoperability, but unfortunately
        are typically implemented at the application layer without considering the
        available resources at the underlay imposing a significant degradation of
        application performance in the setting of Wireless Mesh Networks. In this paper
        we present SPINE, a content-based publish/subscribe system, which considers the
        particular challenges of deploying application-level services in Wireless Mesh
        Networks. SPINE is designed to reduce the overhead which stems from both
        publications and reconfigurations, to cope with the inherent capacity limitations
        on communication links as well as with mobility of the wireless mesh-clients. We
        demonstrate the effectiveness of SPINE by comparison with traditional approaches
        in implementing content-based publish/subscribe}, 
  www_section = {mesh networks, publish/subscribe}, 
  url = {http://studia.complexica.net/index.php?option=com_content\&view=article\&id=116\%3Aspine--adaptive-publishsubscribe-for-wireless-mesh-networks-pp-320-353\&catid=47\%3Anumber-3\&Itemid=89\&lang=fr},
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/RI070302.pdf}, 
}
2009_19
@booklet{2009_19,
  title = {SpoVNet Security Task Force Report}, 
  author = {Ralph Holz and Mayer, Christoph P. and Sebastian Mies and Heiko Niedermayer and
        Tariq, Muhammad Adnan}, 
  volume = {ISSN 1613-849X}, 
  number = {TM-2009-2}, 
  year = {2009}, 
  publisher = {Institute of Telematics, Universit{\"a}t Karlsruhe (TH)}, 
  type = {Telematics Technical Report}, 
  url = {http://doc.tm.uka.de/2009/TM-2009-3.pdf}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/TM-2009-3.pdf}, 
%%%%% ERROR: Missing field
% www_section = {?????},
}
2009_2
@conference{2009_2,
  title = {A Collusion-Resistant Distributed Scalar Product Protocol with Application to
        Privacy-Preserving Computation of Trust}, 
  author = {Melchor, C.A. and Ait-Salem, B. and Gaborit, P.}, 
  booktitle = {Network Computing and Applications, 2009. NCA 2009. Eighth IEEE
        International Symposium on}, 
  year = {2009}, 
  month = {July}, 
  abstract = {Private scalar product protocols have proved to be interesting in various
        applications such as data mining, data integration, trust computing, etc. In
        2007, Yao et al. proposed a distributed scalar product protocol with application
        to privacy-preserving computation of trust [1]. This protocol is split in two
        phases: an homorphic encryption computation; and a private multi-party summation
        protocol. The summation protocol has two drawbacks: first, it generates a
        non-negligible communication overhead; and second, it introduces a security flaw.
        The contribution of this present paper is two-fold. We first prove that the
        protocol of [1] is not secure in the semi-honest model by showing that it is not
        resistant to collusion attacks and we give an example of a collusion attack, with
        only four participants. Second, we propose to use a superposed sending round as
        an alternative to the multi-party summation protocol, which results in better
        security properties and in a reduction of the communication costs. In particular,
        regarding security, we show that the previous scheme was vulnerable to collusions
        of three users whereas in our proposal we can t isin [1..n--1] and define a
        protocol resisting to collusions of up to t users}, 
  www_section = {collaboration, collusion-resistant distributed protocol, Computer
        applications, computer networks, cryptographic protocols, cryptography, data
        privacy, distributed computing, homorphic encryption computation, Laboratories,
        Portable media players, privacy-preserving computation, Privacy-preserving
        computation of trust, private multiparty summation protocol, scalar product
        protocol, secure multi-party computation, Secure scalar product, security,
        Superposed sending., Telephony, trust computation}, 
  doi = {10.1109/NCA.2009.48}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/CollusionResistant2009Melchor.pdf},
  url = {https://bibliography.gnunet.org}, 
}
2009_20
@conference{2009_20,
  title = {Towards End-to-End Connectivity for Overlays across Heterogeneous Networks}, 
  author = {Sebastian Mies and Oliver Waldhorst and Hans Wippel}, 
  booktitle = {Proc. Int. Workshop on the Network of the Future (Future-Net 2009),
        co-located with IEEE Int. Conf. on Communications (ICC 2009)}, 
  year = {2009}, 
  address = {Dresden, Germany}, 
  abstract = {The incremental adoption of IPv6, middle boxes (e.g., NATs, Firewalls) as
        well as completely new network types and protocols paint a picture of a future
        Internet that consists of extremely heterogeneous edge networks (e.g. IPv4, IPv6,
        industrial Ethernet, sensor networks) that are not supposed or able to
        communicate directly. This increasing heterogeneity imposes severe challenges for
        overlay networks, which are considered as a potential migration strategy towards
        the future Internet since they can add new functionality and services in a
        distributed and self-organizing manner. Unfortunately, overlays are based on
        end-to-end connectivity and, thus, their deployment is hindered by network
        heterogeneity. In this paper, we take steps towards a solution to enable overlay
        connections in such heterogeneous networks, building upon a model of
        heterogeneous networks that comprises several connectivity domains with direct
        connectivity, interconnected by relays. As major contribution, we present a
        distributed protocol that detects the boundaries of connectivity domains as well
        as relays using a gossiping approach. Furthermore, the protocol manages unique
        identifiers of connectivity domains and efficiently handles domain splitting and
        merging due to underlay changes. Simulation studies indicate that the algorithm
        can handle splitting and merging of connectivity domains in reasonable time and
        is scalable with respect to control overhead}, 
  isbn = {978-1-4244-3437-4}, 
  doi = {10.1109/ICCW.2009.5207975}, 
  url = {http://ieeexplore.ieee.org/xpl/freeabs_all.jsp?arnumber=5207975}, 
%%%%% ERROR: Missing field
% www_section = {?????},
}
2009_21
@conference{2009_21,
  title = {Traffic Engineering vs. Content Distribution: A Game Theoretic Perspective}, 
  author = {Dominic DiPalantino and Ramesh Johari}, 
  booktitle = {INFOCOM 2009. The 28th IEEE International Conference on Computer
        Communications}, 
  organization = {IEEE Computer Society}, 
  year = {2009}, 
  month = {April}, 
  address = {Rio de Janeiro}, 
  pages = {540--548}, 
  publisher = {IEEE Computer Society}, 
  abstract = {In this paper we explore the interaction between content distribution and
        traffic engineering. Because a traffic engineer may be unaware of the structure
        of content distribution systems or overlay networks, this management of the
        network does not fully anticipate how traffic might change as a result of his
        actions. Content distribution systems that assign servers at the application
        level can respond very rapidly to changes in the routing of the network.
        Consequently, the traffic engineer's decisions may almost never be applied to the
        intended traffic. We use a game-theoretic framework in which infinitesimal users
        of a network select the source of content, and the traffic engineer decides how
        the traffic will route through the network. We formulate a game and prove the
        existence of equilibria. Additionally, we present a setting in which equilibria
        are socially optimal, essentially unique, and stable. Conditions under which
        efficiency loss may be bounded are presented, and the results are extended to the
        cases of general overlay networks and multiple autonomous systems}, 
  www_section = {content distribution, traffic engineering}, 
  isbn = {978-1-4244-3512-8}, 
  doi = {10.1109/INFCOM.2009.5061960}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/INFOCOM\%2709\%20-\%20Traffic\%20Engineering\%20vs.\%20Content\%20Distribution.PDF},
}
2009_22
@book{2009_22,
  title = {Tuning Vivaldi: Achieving Increased Accuracy and Stability}, 
  author = {Benedikt Elser and Andreas F{\"o}rschler and Thomas Fuhrmann}, 
  booktitle = {Self-Organizing Systems}, 
  volume = {Volume 5918/2009}, 
  year = {2009}, 
  pages = {174--184}, 
  publisher = {unknown}, 
  series = {Lecture Notes in Computer Science}, 
  abstract = {Network Coordinates are a basic building block for most peer-to-peer
        applications nowadays. They optimize the peer selection process by allowing the
        nodes to preferably attach to peers to whom they then experience a low round trip
        time. Albeit there has been substantial research effort in this topic over the
        last years, the optimization of the various network coordinate algorithms has not
        been pursued systematically yet. Analyzing the well-known Vivaldi algorithm and
        its proposed optimizations with several sets of extensive Internet traffic
        traces, we found that in face of current Internet data most of the parameters
        that have been recommended in the original papers are a magnitude too high. Based
        on this insight, we recommend modified parameters that improve the algorithms'
        performance significantly}, 
  isbn = {978-3-642-10864-8}, 
  issn = {0302-9743}, 
  doi = {10.1007/978-3-642-10865-5}, 
  url = {http://www.springerlink.com/content/h7r3q58251x72155/}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/fulltext.pdf}, 
%%%%% ERROR: Missing field
% www_section = {?????},
}
2009_3
@conference{2009_3,
  title = {Differentially Private Recommender Systems: Building Privacy into the Netflix
        Prize Contenders}, 
  author = {McSherry, Frank and Mironov, Ilya}, 
  booktitle = {Proceedings of the 15th ACM SIGKDD International Conference on Knowledge
        Discovery and Data Mining}, 
  organization = {ACM}, 
  year = {2009}, 
  address = {New York, NY, USA}, 
  publisher = {ACM}, 
  abstract = {We consider the problem of producing recommendations from collective user
        behavior while simultaneously providing guarantees of privacy for these users.
        Specifically, we consider the Netflix Prize data set, and its leading algorithms,
        adapted to the framework of differential privacy. Unlike prior privacy work
        concerned with cryptographically securing the computation of recommendations,
        differential privacy constrains a computation in a way that precludes any
        inference about the underlying records from its output. Such algorithms
        necessarily introduce uncertainty--i.e., noise--to computations, trading accuracy
        for privacy. We find that several of the leading approaches in the Netflix Prize
        competition can be adapted to provide differential privacy, without significantly
        degrading their accuracy. To adapt these algorithms, we explicitly factor them
        into two parts, an aggregation/learning phase that can be performed with
        differential privacy guarantees, and an individual recommendation phase that uses
        the learned correlations and an individual's data to provide personalized
        recommendations. The adaptations are non-trivial, and involve both careful
        analysis of the per-record sensitivity of the algorithms to calibrate noise, as
        well as new post-processing steps to mitigate the impact of this noise. We
        measure the empirical trade-off between accuracy and privacy in these
        adaptations, and find that we can provide non-trivial formal privacy guarantees
        while still outperforming the Cinematch baseline Netflix provides}, 
  www_section = {Differential Privacy, Netflix, recommender systems}, 
  isbn = {978-1-60558-495-9}, 
  doi = {10.1145/1557019.1557090}, 
  url = {http://doi.acm.org/10.1145/1557019.1557090}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/PrivateRecommender2009McSherry.pdf},
}
2009_4
@conference{2009_4,
  title = {Enhancing Application-Layer Multicast Solutions by Wireless Underlay Support}, 
  author = {H{\"u}bsch, Christian and Oliver Waldhorst}, 
  booktitle = {Kommunikation in Verteilten Systemen (KiVS) 2009, Kassel, Germany}, 
  year = {2009}, 
  abstract = {Application Layer Multicast (ALM) is an attractive solution to overcome the
        deployment problems of IP-Multicast. We show how to cope with the challenges of
        incorporating wireless devices into ALM protocols. As a rst approach we extend
        the NICE protocol, significantly increasing its performance in scenarios with
        many devices connected through wireless LAN}, 
  www_section = {multicast}, 
  url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.143.2935}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/nice-wli.pdf}, 
}
2009_5
@mastersthesis{2009_5,
  title = {Evaluation of Current P2P-SIP Proposals with Respect to the Igor/SSR API}, 
  author = {Markus Bucher}, 
  school = {Technische Universit{\"a}t M{\"u}nchen}, 
  volume = {Computer Science}, 
  year = {2009}, 
  address = {Munich, Germany}, 
  type = {Diplomarbeit}, 
  url = {https://bibliography.gnunet.org}, 
  www_section = {Unsorted}, 
}
2009_6
@mastersthesis{2009_6,
  title = {Monte-Carlo Search Techniques in the Modern Board Game Thurn and Taxis}, 
  author = {Frederik Christiaan Schadd}, 
  school = {Maastricht University}, 
  volume = {Master Science of Artificial Intelligence}, 
  year = {2009}, 
  month = {December}, 
  address = {Maastricht, Netherlands}, 
  pages = {0--93}, 
  type = {Master Thesis}, 
  abstract = {Modern board games present a new and challenging field when researching
        search techniques in the field of Artificial Intelligence. These games differ to
        classic board games, such as chess, in that they can be non-deterministic, have
        imperfect information or more than two players. While tree-search approaches,
        such as alpha-beta pruning, have been quite successful in playing classic board
        games, by for instance defeating the then reigning world champion Gary Kasparov
        in Chess, these techniques are not as effective when applied to modern board
        games. This thesis investigates the effectiveness of Monte-Carlo Tree Search when
        applied to a modern board game, for which the board game Thurn and Taxis was
        used. This is a non-deterministic modern board game with imperfect information
        that can be played with more than 2 players, and is hence suitable for research.
        First, the state-space and game-tree complexities of this game are computed, from
        which the conclusion can be drawn that the two-player version of the game has a
        complexity similar to the game Shogi. Several techniques are investigated in
        order to improve the sampling process, for instance by adding domain knowledge.
        Given the results of the experiments, one can conclude that Monte-Carlo Tree
        Search gives a slight performance increase over standard Monte-Carlo search. In
        addition, the most effective improvements appeared to be the application of
        pseudo-random simulations and limiting simulation lengths, while other techniques
        have been shown to be less effective or even ineffective. Overall, when applying
        the best performing techniques, an AI with advanced playing strength has been
        created, such that further research is likely to push this performance to a
        strength of expert level}, 
  www_section = {artificial intelligence, MCTS, modern board game, Monte-Carlo Tree Search,
        search techniques}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Thesis\%20-\%20F.Schadd.pdf},
  url = {https://bibliography.gnunet.org}, 
}
2009_7
@book{2009_7,
  title = {Multi Party Distributed Private Matching, Set Disjointness and Cardinality of
        Set Intersection with Information Theoretic Security}, 
  author = {Sathya Narayanan, G. and Aishwarya, T. and Agrawal, Anugrah and Patra, Arpita
        and Choudhary, Ashish and Pandu Rangan, C}, 
  booktitle = {Cryptology and Network Security}, 
  organization = {Springer Berlin Heidelberg}, 
  volume = {5888}, 
  year = {2009}, 
  pages = {21--40}, 
  editor = {Garay, JuanA. and Miyaji, Atsuko and Otsuka, Akira}, 
  publisher = {Springer Berlin Heidelberg}, 
  series = {Lecture Notes in Computer Science}, 
  abstract = {In this paper, we focus on the specific problems of Private Matching, Set
        Disjointness and Cardinality of Set Intersection in information theoretic
        settings. Specifically, we give perfectly secure protocols for the above problems
        in n party settings, tolerating a computationally unbounded semi-honest
        adversary, who can passively corrupt at most t < n/2 parties. To the best of our
        knowledge, these are the first such information theoretically secure protocols in
        a multi-party setting for all the three problems. Previous solutions for
        Distributed Private Matching and Cardinality of Set Intersection were
        cryptographically secure and the previous Set Disjointness solution, though
        information theoretically secure, is in a two party setting. We also propose a
        new model for Distributed Private matching which is relevant in a multi-party
        setting}, 
  www_section = {Multiparty Computation, Privacy preserving Set operations}, 
  isbn = {978-3-642-10432-9}, 
  doi = {10.1007/978-3-642-10433-6_2}, 
  url = {http://dx.doi.org/10.1007/978-3-642-10433-6_2}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/MultiParty2009Narayanan.pdf},
}
2009_8
@book{2009_8,
  title = {An Optimally Fair Coin Toss}, 
  author = {Moran, Tal and Naor, Moni and Segev, Gil}, 
  booktitle = {Theory of Cryptography}, 
  organization = {Springer Berlin Heidelberg}, 
  volume = {5444}, 
  year = {2009}, 
  pages = {1--18}, 
  editor = {Reingold, Omer}, 
  publisher = {Springer Berlin Heidelberg}, 
  series = {Lecture Notes in Computer Science}, 
%%%%% ERROR: Non-ASCII characters: ''Ω√''
  abstract = {We address one of the foundational problems in cryptography: the bias of
        coin-flipping protocols. Coin-flipping protocols allow mutually distrustful
        parties to generate a common unbiased random bit, guaranteeing that even if one
        of the parties is malicious, it cannot significantly bias the output of the
        honest party. A classical result by Cleve [STOC '86] showed that for any
        two-party r-round coin-flipping protocol there exists an efficient adversary that
        can bias the output of the honest party by Ω(1/r). However, the best previously
        known protocol only guarantees O(1/√r) bias, and the question of whether Cleve's
        bound is tight has remained open for more than twenty years. In this paper we
        establish the optimal trade-off between the round complexity and the bias of
        two-party coin-flipping protocols. Under standard assumptions (the existence of
        oblivious transfer), we show that Cleve's lower bound is tight: we construct an
        r-round protocol with bias O(1/r)}, 
  isbn = {978-3-642-00456-8}, 
  doi = {10.1007/978-3-642-00457-5_1}, 
  url = {http://dx.doi.org/10.1007/978-3-642-00457-5_1}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/OptimallyFairCoinToss2009Moran.pdf},
%%%%% ERROR: Missing field
% www_section = {?????},
}
2009_9
@conference{2009_9,
  title = {Optimization of distributed services with UNISONO}, 
  author = {unknown}, 
  booktitle = {GI/ITG KuVS Fachgespr{\"a}ch NGN Service Delivery Platforms \& Service
        Overlay Networks}, 
  year = {2009}, 
  abstract = {Distributed services are a special case of P2P networks where nodes have
        several distinctive tasks. Based on previous work, we show how UNISONO provides a
        way to optimize these services to increase performance, efficiency and user
        experience. UNISONO is a generic framework for host-based distributed network
        measurements. In this talk, we present UNISONO as an Enabler for self-organizing
        Service Delivery Plattforms. We give a short overview of the UNISONO concept and
        show how distributed services benefit from its usage}, 
  www_section = {distributed systems, P2P}, 
  url = {http://www.net.in.tum.de/de/mitarbeiter/holz/}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/unisono_kuvs-ngn.pdf}, 
}
2010_0
@conference{2010_0,
  title = {Application of Random Walks to Decentralized Recommender Systems}, 
  author = {Anne-Marie Kermarrec and Vincent Leroy and Afshin Moin and Christopher
        Thraves}, 
  booktitle = {14th International Conference on Principles of Distributed Systems}, 
  year = {2010}, 
  month = {September}, 
  www_section = {random walks, recommender system}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/opodis10_HAL.pdf}, 
  url = {https://bibliography.gnunet.org}, 
}
2010_1
@article{2010_1,
  title = {The Ariba Framework for Application Development using Service Overlays}, 
  author = {H{\"u}bsch, Christian and Mayer, Christoph P. and Oliver Waldhorst}, 
  journal = {Praxis der Informationsverarbeitung und Kommunikation}, 
  volume = {33}, 
  year = {2010}, 
  pages = {7--11}, 
  abstract = {Developing new network services in the Internet is complex and costly. This
        high entrance barrier has prevented new innovation in the network itself, and
        stuck the Internet as being mainly browser-based client/server systems.
        End-system based decentralized services are cheaper, but have a complexity
        several orders of magnitude higher than centralized systems in terms of structure
        and protocols. To foster development of such decentralized network services, we
        present the ariba framework. We show how ariba can facilitate development of
        end-system based decentralized services through self-organizing service
        overlays--flexibly deployed purely on end-systems without the need for costly
        infrastructure}, 
  www_section = {overlay networks}, 
  issn = {1865-8342}, 
  doi = {10.1515/piko.2010.003}, 
  url = {http://www.reference-global.com/doi/abs/10.1515/piko.2010.003}, 
}
2010_10
@conference{2010_10,
  title = {Private Record Matching Using Differential Privacy}, 
  author = {Inan, Ali and Kantarcioglu, Murat and Ghinita, Gabriel and Bertino, Elisa}, 
  booktitle = {Proceedings of the 13th International Conference on Extending Database
        Technology}, 
  organization = {ACM}, 
  year = {2010}, 
  address = {New York, NY, USA}, 
  publisher = {ACM}, 
  abstract = {Private matching between datasets owned by distinct parties is a challenging
        problem with several applications. Private matching allows two parties to
        identify the records that are close to each other according to some distance
        functions, such that no additional information other than the join result is
        disclosed to any party. Private matching can be solved securely and accurately
        using secure multi-party computation (SMC) techniques, but such an approach is
        prohibitively expensive in practice. Previous work proposed the release of
        sanitized versions of the sensitive datasets which allows blocking, i.e.,
        filtering out sub-sets of records that cannot be part of the join result. This
        way, SMC is applied only to a small fraction of record pairs, reducing the
        matching cost to acceptable levels. The blocking step is essential for the
        privacy, accuracy and efficiency of matching. However, the state-of-the-art
        focuses on sanitization based on k-anonymity, which does not provide sufficient
        privacy. We propose an alternative design centered on differential privacy, a
        novel paradigm that provides strong privacy guarantees. The realization of the
        new model presents difficult challenges, such as the evaluation of distance-based
        matching conditions with the help of only a statistical queries interface.
        Specialized versions of data indexing structures (e.g., kd-trees) also need to be
        devised, in order to comply with differential privacy. Experiments conducted on
        the real-world Census-income dataset show that, although our methods provide
        strong privacy, their effectiveness in reducing matching cost is not far from
        that of k-anonymity based counterparts}, 
  www_section = {Differential Privacy, privacy, record matching, security}, 
  isbn = {978-1-60558-945-9}, 
  doi = {10.1145/1739041.1739059}, 
  url = {http://doi.acm.org/10.1145/1739041.1739059}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/PrivateRecordMatching2010Inan.pdf},
}
2010_11
@conference{2010_11,
  title = {On Runtime Adaptation of Application-Layer Multicast Protocol Parameters}, 
  author = {H{\"u}bsch, Christian and Mayer, Christoph P. and Oliver Waldhorst}, 
  booktitle = {Proceedings of Networked Services and Applications -- Engineering, Control
        and Management (EUNICE)}, 
  organization = {Springer}, 
  year = {2010}, 
  address = {Trondheim, Norway}, 
  publisher = {Springer}, 
  note = {to appear}, 
  series = {Lecture Notes in Computer Science}, 
  url = {http://www.tm.uni-karlsruhe.de/itm/WebMan/view.php?view=publikationen_detail\&id=389\&lang=en},
  www_section = {Unsorted}, 
}
2010_12
@conference{2010_12,
  title = {Scalable Application-Layer Multicast Simulations with OverSim}, 
  author = {Stephan Krause and H{\"u}bsch, Christian}, 
  booktitle = {7th Annual IEEE Consumer Communiations \& Networking Conference}, 
  year = {2010}, 
  abstract = {Application-Layer Multicast has become a promising class of protocols since
        IP Multicast has not found wide area deployment in the Internet. Developing such
        protocols requires in-depth analysis of their properties even with large numbers
        of participants---a characteristic which is at best hard to achieve in real
        network experiments. Several well-known simulation frameworks have been developed
        and used in recent years, but none has proved to be fitting the requirements for
        analyzing large-scale application-layer networks. In this paper we propose the
        OverSim framework as a promising simulation environment for scalabe
        Application-Layer Multicast research. We show that OverSim is able to manage even
        overlays with several thousand participants in short time while consuming
        comparably little memory. We compare the framework's runtime properties with the
        two exemplary Application-Layer Mutlicast protocols Scribe and NICE. The results
        show that both simulation time and memory consumption grow linearly with the
        number of nodes in highly feasible dimensions}, 
  www_section = {multicast, NICE, OverSim, Scribe}, 
  url = {https://bibliography.gnunet.org}, 
}
2010_13
@conference{2010_13,
  title = {User-perceived Performance of the NICE Application Layer Multicast Protocol in
        Large and Highly Dynamic Groups}, 
  author = {H{\"u}bsch, Christian and Mayer, Christoph P. and Oliver Waldhorst}, 
  booktitle = {Proceedings of 15th International GI/ITG Conference on "Measurement,
        Modelling and Evaluation of Computing Systems"}, 
  organization = {Springer Berlin, Heidelberg}, 
  year = {2010}, 
  month = {January}, 
  address = {Essen, Germany}, 
  pages = {62--77}, 
  publisher = {Springer Berlin, Heidelberg}, 
  note = {Best Paper Award}, 
  abstract = {The presentation of a landmark paper by Chu et al. at SIGMETRICS 2000
        introduced application layer multicast (ALM) as completely new area of network
        research. Many researchers have since proposed ALM protocols, and have shown that
        these protocols only put a small burden on the network in terms of link-stress
        and -stretch. However, since the network is typically not a bottleneck, user
        acceptance remains the limiting factor for the deployment of ALM. In this paper
        we present an in-depth study of the user-perceived performance of the NICE ALM
        protocol. We use the OverSim simulation framework to evaluate delay experienced
        by a user and bandwidth consumption on the user's access link in large multicast
        groups and under aggressive churn models. Our major results are (1) latencies
        grow moderate with increasing number of nodes as clusters get optimized, (2) join
        delays get optimized over time, and (3) despite being a tree-dissemination
        protocol NICE handles churn surprisingly well when adjusting heartbeat intervals
        accordingly. We conclude that NICE comes up to the user's expectations even for
        large groups and under high churn. This work was partially funded as part of the
        Spontaneous Virtual Networks (SpoVNet) project by the Landesstiftung
        Baden-W{\"u}rttemberg within the BW-FIT program and as part of the Young
        Investigator Group Controlling Heterogeneous and Dynamic Mobile Grid and
        Peer-to-Peer Systems (CoMoGriP) by the Concept for the Future of Karlsruhe
        Institute of Technology (KIT) within the framework of the German Excellence
        Initiative}, 
  isbn = {978-3-642-12103-6}, 
  doi = {10.1007/978-3-642-12104-3}, 
  url = {http://www.springerlink.com/content/t6k421560103540n/}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/User-Perceived\%20Performance\%20of\%20the\%20NICE\%20Application\%20Layer\%20Multicast\%20Protocol\%20in\%20Large\%20and\%20Highly\%20Dynamic\%20Groups_1.pdf},
  www_section = {Unsorted}, 
}
2010_14
@conference{2010_14,
  title = {Using Legacy Applications in Future Heterogeneous Networks with ariba}, 
  author = {H{\"u}bsch, Christian and Mayer, Christoph P. and Sebastian Mies and Roland
        Bless and Oliver Waldhorst and Martina Zitterbart}, 
  booktitle = {Proceedings of IEEE INFOCOM}, 
  year = {2010}, 
  address = {San Diego, CA, USA}, 
  note = {Demo}, 
  url = {https://bibliography.gnunet.org}, 
  www_section = {Unsorted}, 
}
2010_2
@conference{2010_2,
  title = {Autonomous NAT Traversal}, 
  author = {Andreas M{\"u}ller and Nathan S Evans and Christian Grothoff and Samy Kamkar}, 
  booktitle = {10th IEEE International Conference on Peer-to-Peer Computing (IEEE P2P'10)}, 
  organization = {IEEE}, 
  year = {2010}, 
  address = {Delft, The Netherlands}, 
  publisher = {IEEE}, 
  abstract = {Traditional NAT traversal methods require the help of a third party for
        signalling. This paper investigates a new autonomous method for establishing
        connections to peers behind NAT. The proposed method for Autonomous NAT traversal
        uses fake ICMP messages to initially contact the NATed peer. This paper presents
        how the method is supposed to work in theory, discusses some possible variations,
        introduces various concrete implementations of the proposed approach and
        evaluates empirical results of a measurement study designed to evaluate the
        efficacy of the idea in practice}, 
  www_section = {GNUnet, ICMP, NAT, P2P}, 
  url = {http://grothoff.org/christian/pwnat.pdf}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/pwnat.pdf}, 
}
2010_3
@mastersthesis{2010_3,
  title = {Developing Peer-to-Peer Web Applications}, 
  author = {Toni Ruottu}, 
  school = {University of Helsinki}, 
  volume = {M.S}, 
  year = {2010}, 
  month = {September}, 
  address = {Helsinki}, 
  pages = {0--66}, 
  type = {Master's Thesis}, 
  abstract = {As the virtual world grows more complex, finding a standard way for storing
        data becomes increasingly important. Ideally, each data item would be brought
        into the computer system only once. References for data items need to be
        cryptographically verifiable, so the data can maintain its identity while being
        passed around. This way there will be only one copy of the users family photo
        album, while the user can use multiple tools to show or manipulate the album.
        Copies of users data could be stored on some of his family members computer, some
        of his computers, but also at some online services which he uses. When all actors
        operate over one replicated copy of the data, the system automatically avoids a
        single point of failure. Thus the data will not disappear with one computer
        breaking, or one service provider going out of business. One shared copy also
        makes it possible to delete a piece of data from all systems at once, on users
        request. In our research we tried to find a model that would make data manageable
        to users, and make it possible to have the same data stored at various locations.
        We studied three systems, Persona, Freenet, and GNUnet, that suggest different
        models for protecting user data. The main application areas of the systems
        studied include securing online social networks, providing anonymous web, and
        preventing censorship in file-sharing. Each of the systems studied store user
        data on machines belonging to third parties. The systems differ in measures they
        take to protect their users from data loss, forged information, censorship, and
        being monitored. All of the systems use cryptography to secure names used for the
        content, and to protect the data from outsiders. Based on the gained knowledge,
        we built a prototype platform called Peerscape, which stores user data in a
        synchronized, protected database. Data items themselves are protected with
        cryptography against forgery, but not encrypted as the focus has been
        disseminating the data directly among family and friends instead of letting third
        parties store the information. We turned the synchronizing database into
        peer-to-peer web by revealing its contents through an integrated http server. The
        REST-like http API supports development of applications in javascript. To
        evaluate the platform's suitability for application development we wrote some
        simple applications, including a public chat room, bittorrent site, and a flower
        growing game. During our early tests we came to the conclusion that using the
        platform for simple applications works well. As web standards develop further,
        writing applications for the platform should become easier. Any system this
        complex will have its problems, and we are not expecting our platform to replace
        the existing web, but are fairly impressed with the results and consider our work
        important from the perspective of managing user data}, 
  www_section = {content centric, ECRS, Freenet, GNUnet, P2P, Peerscape, Persona}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/twr-dp2pwa.pdf}, 
  url = {https://bibliography.gnunet.org}, 
}
2010_4
@article{2010_4,
  title = {On the Difficulties of Disclosure Prevention in Statistical Databases or The
        Case for Differential Privacy}, 
  author = {Cynthia Dwork and Moni Naor}, 
  journal = {Journal of Privacy and Confidentiality}, 
  volume = {2}, 
  year = {2010}, 
  pages = {93--107}, 
  abstract = {In 1977 Tore Dalenius articulated a desideratum for statistical databases:
        nothing about an individual should be learnable from the database that cannot be
        learned without access to the database. We give a general impossibility result
        showing that a natural formalization of Dalenius' goal cannot be achieved if the
        database is useful. The key obstacle is the side information that may be
        available to an adversary. Our results hold under very general conditions
        regarding the database, the notion of privacy violation, and the notion of
        utility.

Contrary to intuition, a variant of the result threatens the privacy even of someone not in the database. This state of affairs motivated the notion of differential privacy [15, 16], a strong ad omnia privacy which, intuitively, captures the increased risk to one's privacy incurred by participating in a database}, url = {http://research.microsoft.com/apps/pubs/default.aspx?id=135704}, www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/DisclousrePrevention2010Dwork.pdf}, www_section = {Unsorted}, }

2010_5
@book{2010_5,
  title = {Drac: An Architecture for Anonymous Low-Volume Communications}, 
  author = {Danezis, George and Claudia Diaz and Troncoso, Carmela and Laurie, Ben}, 
  booktitle = {Privacy Enhancing Technologies}, 
  organization = {Springer Berlin Heidelberg}, 
  volume = {6205}, 
  year = {2010}, 
  pages = {202--219}, 
  editor = {Atallah, MikhailJ. and Hopper, Nicholas J}, 
  publisher = {Springer Berlin Heidelberg}, 
  series = {Lecture Notes in Computer Science}, 
  www_section = {anonymous communication, anonymous IM, anonymous voice, Drac, F2F}, 
  isbn = {978-3-642-14526-1}, 
  doi = {10.1007/978-3-642-14527-8_12}, 
  url = {http://dx.doi.org/10.1007/978-3-642-14527-8_12}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/drac-pet2010.pdf}, 
}
2010_6
@conference{2010_6,
  title = {Efficient DHT attack mitigation through peers' ID distribution}, 
  author = {Cholez, Thibault and Chrisment, Isabelle and Festor, Olivier}, 
  booktitle = {HOTP2P'10--International Workshop on Hot Topics in Peer-to-Peer Systems}, 
  year = {2010}, 
  month = {April}, 
  address = {Atlanta, Georgia, USA}, 
  abstract = {We present a new solution to protect the widely deployed KAD DHT against
        localized attacks which can take control over DHT entries. We show through
        measurements that the IDs distribution of the best peers found after a lookup
        process follows a geometric distribution. We then use this result to detect DHT
        attacks by comparing real peers' ID distributions to the theoretical one thanks
        to the Kullback-Leibler divergence. When an attack is detected, we propose
        countermeasures that progressively remove suspicious peers from the list of
        possible contacts to provide a safe DHT access. Evaluations show that our method
        detects the most efficient attacks with a very small false-negative rate, while
        countermeasures successfully filter almost all malicious peers involved in an
        attack. Moreover, our solution completely fits the current design of the KAD
        network and introduces no network overhead}, 
  www_section = {attack detection, attack mitigation, distributed hash table, IDs
        distribution, KAD, Sybil attack}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/HotP2P\%2710\%20-\%20KAD\%20DHT\%20attack\%20mitigation.pdf},
  url = {https://bibliography.gnunet.org}, 
}
2010_7
@conference{2010_7,
  title = {How Accurately Can One's Interests Be Inferred from Friends?}, 
  author = {Wen, Zhen and Lin, Ching-Yung}, 
  booktitle = {Proceedings of the 19th International Conference on World Wide Web}, 
  organization = {ACM}, 
  year = {2010}, 
  address = {New York, NY, USA}, 
  publisher = {ACM}, 
  abstract = {Search and recommendation systems must effectively model user interests in
        order to provide personalized results. The proliferation of social software makes
        social network an increasingly important source for user interest modeling, be-
        cause of the social influence and correlation among friends. However, there are
        large variations in people's contribution of social content. Therefore, it is
        impractical to accurately model interests for all users. As a result,
        applications need to decide whether to utilize a user interest model based on its
        accuracy. To address this challenge, we present a study on the accuracy of user
        interests inferred from three types of social content: social bookmarking, file
        sharing, and electronic communication, in an organizational social network within
        a large-scale enterprise. First, we demonstrate that combining different types of
        social content to infer user interests outperforms methods that use only one type
        of social content. Second, we present a technique to predict the inference
        accuracy based on easily observed network characteristics, including user
        activeness, network in-degree, out-degree, and betweenness centrality}, 
  www_section = {accuracy, social networks, user modeling}, 
  isbn = {978-1-60558-799-8}, 
  doi = {10.1145/1772690.1772875}, 
  url = {http://doi.acm.org/10.1145/1772690.1772875}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/InterestsInference2010Wen.pdf},
}
2010_8
@mastersthesis{2010_8,
  title = {Incentive-driven QoS in peer-to-peer overlays}, 
  author = {Raul Leonardo Landa Gamiochipi}, 
  school = {University College London}, 
  volume = {PhD}, 
  year = {2010}, 
  month = {May}, 
  address = {London}, 
  pages = {0--209}, 
  abstract = {A well known problem in peer-to-peer overlays is that no single entity has
        control over the software, hardware and configuration of peers. Thus, each peer
        can selfishly adapt its behaviour to maximise its benefit from the overlay. This
        thesis is concerned with the modelling and design of incentive mechanisms for
        QoS-overlays: resource allocation protocols that provide strategic peers with
        participation incentives, while at the same time optimising the performance of
        the peer-to-peer distribution overlay. The contributions of this thesis are as
        follows. First, we present PledgeRoute, a novel contribution accounting system
        that can be used, along with a set of reciprocity policies, as an incentive
        mechanism to encourage peers to contribute resources even when users are not
        actively consuming overlay services. This mechanism uses a decentralised credit
        network, is resilient to sybil attacks, and allows peers to achieve time and
        space deferred contribution reciprocity. Then, we present a novel, QoS-aware
        resource allocation model based on Vickrey auctions that uses PledgeRoute as a
        substrate. It acts as an incentive mechanism by providing efficient overlay
        construction, while at the same time allocating increasing service quality to
        those peers that contribute more to the network. The model is then applied to
        lagsensitive chunk swarming, and some of its properties are explored for
        different peer delay distributions. When considering QoS overlays deployed over
        the best-effort Internet, the quality received by a client cannot be adjudicated
        completely to either its serving peer or the intervening network between them. By
        drawing parallels between this situation and well-known hidden action situations
        in microeconomics, we propose a novel scheme to ensure adherence to advertised
        QoS levels. We then apply it to delay-sensitive chunk distribution overlays and
        present the optimal contract payments required, along with a method for QoS
        contract enforcement through reciprocative strategies. We also present a
        probabilistic model for application-layer delay as a function of the prevailing
        network conditions. Finally, we address the incentives of managed overlays, and
        the prediction of their behaviour. We propose two novel models of multihoming
        managed overlay incentives in which overlays can freely allocate their traffic
        flows between different ISPs. One is obtained by optimising an overlay utility
        function with desired properties, while the other is designed for data-driven
        least-squares fitting of the cross elasticity of demand. This last model is then
        used to solve for ISP profit maximisation}, 
  www_section = {BitTorrent, Freeloading, game theory, incentives, PeerLive, prices, QoS}, 
  url = {http://eprints.ucl.ac.uk/19490/}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/19490.pdf}, 
}
2010_9
@article{2010_9,
  title = {Malugo: A peer-to-peer storage system}, 
  author = {Chan, Yu-Wei and Ho, Tsung-Hsuan and Shih, Po-Chi and Chung, Yeh-Ching}, 
  journal = {unknown}, 
  year = {2010}, 
  abstract = {We consider the problem of routing locality in peer-to-peer storage systems
        where peers store and exchange data among themselves. With the global
        information, peers will take the data locality into consideration when they
        implement their replication mechanisms to keep a number of file replicas all over
        the systems. In this paper, we mainly propose a peer-to-peer storage
        system--Malugo. Algorithms for the implementation of the peers' locating and file
        operation processes are also presented. Simulation results show that the proposed
        system successfully constructs an efficient and stable peer-to-peer storage
        environment with considerations of data and routing locality among peers}, 
  www_section = {distributed storage, Malugo, peer-to-peer storage}, 
  doi = {10.1504/IJAHUC.2010.032995}, 
  url = {http://www.ingentaconnect.com/content/ind/ijahuc/2010/00000005/00000004/art00002;jsessionid=kcpun0o76hoe.alexandra},
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Malugo.pdf}, 
}
2011_0
@article{2011_0,
  title = {Collaborative Personalized Top-k Processing}, 
  author = {Bai, Xiao and Guerraoui, Rachid and Kermarrec, Anne-Marie and Leroy, Vincent}, 
  journal = {ACM Trans. Database Syst}, 
  volume = {36}, 
  year = {2011}, 
  pages = {26:1--26:38}, 
  abstract = {This article presents P4Q, a fully decentralized gossip-based protocol to
        personalize query processing in social tagging systems. P4Q dynamically
        associates each user with social acquaintances sharing similar tagging behaviors.
        Queries are gossiped among such acquaintances, computed on-the-fly in a
        collaborative, yet partitioned manner, and results are iteratively refined and
        returned to the querier. Analytical and experimental evaluations convey the
        scalability of P4Q for top-k query processing, as well its inherent ability to
        cope with users updating profiles and departing}, 
  www_section = {gossip, Peer-to-peer networks, Personalization, top-k processing}, 
  issn = {0362-5915}, 
  doi = {10.1145/2043652.2043659}, 
  url = {http://doi.acm.org/10.1145/2043652.2043659}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/TopK-Processing2011Bai.pdf},
}
2011_1
@article{2011_1,
  title = {A comprehensive study of Convergent and Commutative Replicated Data Types}, 
  author = {Marc Shapiro and Nuno Preguica and Carlos Baquero and Marek Zawirski}, 
  journal = {unknown}, 
  institution = {INRIA Rocquencourt}, 
  number = {7506}, 
  year = {2011}, 
  month = {January}, 
  address = {Le Chensay Cedex}, 
  abstract = {Eventual consistency aims to ensure that replicas of some mutable shared
        object converge without foreground synchronisation. Previous approaches to
        eventual con- sistency are ad-hoc and error-prone. We study a principled
        approach: to base the design of shared data types on some simple formal
        conditions that are sufficient to guarantee even- tual consistency. We call these
        types Convergent or Commutative Replicated Data Types (CRDTs). This paper
        formalises asynchronous object replication, either state based or op- eration
        based, and provides a sufficient condition appropriate for each case. It
        describes several useful CRDTs, including container data types supporting both
        add and remove op- erations with clean semantics, and more complex types such as
        graphs, montonic DAGs, and sequences. It discusses some properties needed to
        implement non-trivial CRDTs}, 
  www_section = {commutative operations, data replication, optimistic replication}, 
  isbn = {0249-6399}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/crdt.pdf}, 
  url = {https://bibliography.gnunet.org}, 
}
2011_11
@conference{2011_11,
  title = {On the Relation Between Differential Privacy and Quantitative Information Flow}, 
  author = {Alvim, M{\'a}rio S. and Andr{\'e}s, Miguel E.}, 
  booktitle = {Proceedings of the 38th International Conference on Automata, Languages and
        Programming--Volume Part II}, 
  organization = {Springer-Verlag}, 
  year = {2011}, 
  address = {Berlin, Heidelberg}, 
  publisher = {Springer-Verlag}, 
  abstract = {Differential privacy is a notion that has emerged in the community of
        statistical databases, as a response to the problem of protecting the privacy of
        the database's participants when performing statistical queries. The idea is that
        a randomized query satisfies differential privacy if the likelihood of obtaining
        a certain answer for a database x is not too different from the likelihood of
        obtaining the same answer on adjacent databases, i.e. databases which differ from
        x for only one individual. Information flow is an area of Security concerned with
        the problem of controlling the leakage of confidential information in programs
        and protocols. Nowadays, one of the most established approaches to quantify and
        to reason about leakage is based on the R{\'e}nyi min entropy version of
        information theory. In this paper, we analyze critically the notion of
        differential privacy in light of the conceptual framework provided by the
        R{\'e}nyi min information theory. We show that there is a close relation between
        differential privacy and leakage, due to the graph symmetries induced by the
        adjacency relation. Furthermore, we consider the utility of the randomized
        answer, which measures its expected degree of accuracy. We focus on certain kinds
        of utility functions called {\textquotedblleft}binary{\textquotedblright}, which
        have a close correspondence with the R{\'e}nyi min mutual information. Again, it
        turns out that there can be a tight correspondence between differential privacy
        and utility, depending on the symmetries induced by the adjacency relation and by
        the query. Depending on these symmetries we can also build an optimal-utility
        randomization mechanism while preserving the required level of differential
        privacy. Our main contribution is a study of the kind of structures that can be
        induced by the adjacency relation and the query, and how to use them to derive
        bounds on the leakage and achieve the optimal utility}, 
  isbn = {978-3-642-22011-1}, 
  url = {http://dl.acm.org/citation.cfm?id=2027223.2027228}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/DifferentialPrivacy2011Alvim.pdf},
  www_section = {Unsorted}, 
}
2011_12
@conference{2011_12,
  title = {Scalability \& Paranoia in a Decentralized Social Network}, 
  author = {Carlo v. Loesch and Gabor X Toth and Mathias Baumann}, 
  booktitle = {Federated Social Web}, 
  year = {2011}, 
  month = {June}, 
  address = {Berlin, Germany}, 
  abstract = {There's a lot of buzz out there about "replacing" Facebook with a
        privacy-enhanced, decentralized, ideally open source something. In this talk
        we'll focus on how much privacy we should plan for (specifically about how we
        cannot entrust our privacy to modern virtual machine technology) and the often
        underestimated problem of getting such a monster network to function properly.
        These issues can be considered together or separately: Even if you're not as
        concerned about privacy as we are, the scalability problem still persists}, 
  www_section = {GNUnet, privacy, social networks}, 
  url = {https://secushare.org/2011-FSW-Scalability-Paranoia}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/2011-FSW-Scalability-Paranoia.pdf},
}
2011_13
@article{2011_13,
  title = {Secure collaborative supply chain planning and inverse optimization--The JELS
        model}, 
  author = {Richard Pibernik and Yingying Zhang and Florian Kerschbaum and Axel
        Schr{\"o}pfer}, 
  journal = {European Journal of Operations Research}, 
  volume = {208}, 
  year = {2011}, 
  month = {January}, 
  pages = {75--85}, 
  abstract = {It is a well-acknowledged fact that collaboration between different members
        of a supplychain yields a significant potential to increase overall supplychain
        performance. Sharing private information has been identified as prerequisite for
        collaboration and, at the same time, as one of its major obstacles. One potential
        avenue for overcoming this obstacle is Secure Multi-Party Computation (SMC). SMC
        is a cryptographic technique that enables the computation of any (well-defined)
        mathematical function by a number of parties without any party having to disclose
        its input to another party. In this paper, we show how SMC can be successfully
        employed to enable joint decision-making and benefit sharing in a simple
        supplychain setting. We develop secure protocols for implementing the well-known
        {\textquotedblleft}Joint Economic Lot Size (JELS) Model{\textquotedblright} with
        benefit sharing in such a way that none of the parties involved has to disclose
        any private (cost and capacity) data. Thereupon, we show that although
        computation of the model's outputs can be performed securely, the approach still
        faces practical limitations. These limitations are caused by the potential of
        {\textquotedblleft}inverseoptimization{\textquotedblright}, i.e., a party can
        infer another party's private data from the output of a collaborativeplanning
        scheme even if the computation is performed in a secure fashion. We provide a
        detailed analysis of {\textquotedblleft}inverseoptimization{\textquotedblright}
        potentials and introduce the notion of {\textquotedblleft}stochastic
        security{\textquotedblright}, a novel approach to assess the additional
        information a party may learn from joint computation and benefit sharing. Based
        on our definition of {\textquotedblleft}stochastic security{\textquotedblright}
        we propose a stochastic benefit sharing rule, develop a secure protocol for this
        benefit sharing rule, and assess under which conditions stochastic benefit
        sharing can guarantee secure collaboration}, 
  www_section = {collaboration, information sharing, secure multi-party computation, SMC,
        supplychain management}, 
  doi = {http://dx.doi.org/10.1016/j.ejor.2010.08.018}, 
  url = {http://www.sciencedirect.com/science/article/pii/S0377221710005552}, 
}
2011_14
@book{2011_14,
  title = {Social Market: Combining Explicit and Implicit Social Networks}, 
  author = {Frey, Davide and J{\'e}gou, Arnaud and Kermarrec, Anne-Marie}, 
  booktitle = {Stabilization, Safety, and Security of Distributed Systems}, 
  organization = {Springer Berlin Heidelberg}, 
  volume = {6976}, 
  year = {2011}, 
  pages = {193--207}, 
  editor = {D{\'e}fago, Xavier and Petit, Franck and Villain, Vincent}, 
  publisher = {Springer Berlin Heidelberg}, 
  series = {Lecture Notes in Computer Science}, 
  abstract = {The pervasiveness of the Internet has lead research and applications to focus
        more and more on their users. Online social networks such as Facebook provide
        users with the ability to maintain an unprecedented number of social connections.
        Recommendation systems exploit the opinions of other users to suggest movies or
        products based on our similarity with them. This shift from machines to users
        motivates the emergence of novel applications and research challenges. In this
        paper, we embrace the social aspects of the Web 2.0 by considering a novel
        problem. We build a distributed social market that combines interest-based social
        networks with explicit networks like Facebook. Our Social Market (SM) allows
        users to identify and build connections to other users that can provide
        interesting goods, or information. At the same time, it backs up these
        connections with trust, by associating them with paths of trusted users that
        connect new acquaintances through the explicit network. This convergence of
        implicit and explicit networks yields TAPS, a novel gossip protocol that can be
        applied in applications devoted to commercial transactions, or to add robustness
        to standard gossip applications like dissemination or recommendation systems}, 
  isbn = {978-3-642-24549-7}, 
  doi = {10.1007/978-3-642-24550-3_16}, 
  url = {http://dx.doi.org/10.1007/978-3-642-24550-3_16}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/SocialMarket2011Frey.pdf},
  www_section = {Unsorted}, 
}
2011_15
@conference{2011_15,
  title = {SWIRL: A Scalable Watermark to Detect Correlated Network Flows}, 
  author = {Amir Houmansadr and Borisov, Nikita}, 
  booktitle = {NDSS'11--Proceedings of the Network and Distributed Security Symposium}, 
  year = {2011}, 
  month = {February}, 
  address = {San Diego, CA, USA}, 
  abstract = {Flow watermarks are active traffic analysis techniques that help establish a
        causal connection between two network flows by content-independent manipulations,
        e.g., altering packet timings. Watermarks provide a much more scalable approach
        for flow correlation than passive traffic analysis. Previous designs of scalable
        watermarks, however, were subject to multi-flow attacks. They also introduced
        delays too large to be used in most environments. We design SWIRL, a Scalable
        Watermark that is Invisible and Resilient to packet Losses. SWIRL is the first
        watermark that is practical to use for large-scale traffic analysis. SWIRL uses a
        flow-dependent approach to resist multi-flow attacks, marking each flow with a
        different pattern. SWIRL is robust to packet losses and network jitter, yet it
        introduces only small delays that are invisible to both benign users and
        determined adversaries. We analyze the performance of SWIRL both analytically and
        on the PlanetLab testbed, demonstrating very low error rates. We consider
        applications of SWIRL to stepping stone detection and linking anonymous
        communication. We also propose a novel application of watermarks to defend
        against congestion attacks on Tor}, 
  www_section = {anonymity, SWIRL, traffic analysis, watermarking}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/NDSS11-2.pdf}, 
  url = {https://bibliography.gnunet.org}, 
}
2011_16
@conference{2011_16,
  title = {"You Might Also Like:" Privacy Risks of Collaborative Filtering}, 
  author = {Calandrino, J.A. and Kilzer, A. and Narayanan, A. and Felten, E.W. and
        Shmatikov, V.}, 
  booktitle = {Security and Privacy (SP), 2011 IEEE Symposium on}, 
  year = {2011}, 
  month = {May}, 
  abstract = {Many commercial websites use recommender systems to help customers locate
        products and content. Modern recommenders are based on collaborative filtering:
        they use patterns learned from users' behavior to make recommendations, usually
        in the form of related-items lists. The scale and complexity of these systems,
        along with the fact that their outputs reveal only relationships between items
        (as opposed to information about users), may suggest that they pose no meaningful
        privacy risk. In this paper, we develop algorithms which take a moderate amount
        of auxiliary information about a customer and infer this customer's transactions
        from temporal changes in the public outputs of a recommender system. Our
        inference attacks are passive and can be carried out by any Internet user. We
        evaluate their feasibility using public data from popular websites Hunch,
        Last.fm, LibraryThing, and Amazon}, 
  www_section = {accuracy, Amazon, collaboration, collaborative filtering, commercial Web
        sites, consumer behaviour, Covariance matrix, customer transactions, data
        privacy, groupware, History, Hunch, Inference algorithms, inference attacks,
        inference mechanisms, information filtering, Internet, Internet user, Last.fm,
        Library Thing, privacy, privacy risks, recommender systems, Web sites}, 
  doi = {10.1109/SP.2011.40}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Youmightlike2011Calandrino.pdf},
  url = {https://bibliography.gnunet.org}, 
}
2011_2
@conference{2011_2,
  title = {Considering Complex Search Techniques in DHTs under Churn}, 
  author = {Jamie Furness and Mario Kolberg}, 
  booktitle = {CCNC 2011--IEEE Consumer Communications and Networking Conference}, 
  organization = {IEEE Computer Society}, 
  year = {2011}, 
  month = {January}, 
  address = {Las Vegas, NV, USA}, 
  publisher = {IEEE Computer Society}, 
  abstract = {Traditionally complex queries have been performed over unstructured P2P
        networks by means of flooding, which is inherently inefficient due to the large
        number of redundant messages generated. While Distributed Hash Tables (DHTs) can
        provide very efficient look-up operations, they traditionally do not provide any
        methods for complex queries. By exploiting the structure inherent in DHTs we can
        perform complex querying over structured P2P networks by means of efficiently
        broadcasting the search query. This allows every node in the network to process
        the query locally, and hence is as powerful and flexible as flooding in
        unstructured networks, but without the inefficiency of redundant messages. While
        there have been various approaches proposed for broadcasting search queries over
        DHTs, the focus has not been on validation under churn. Comparing blind search
        methods for DHTs though simulation we see that churn, in particular nodes leaving
        the network, has a large impact on query success rate. In this paper we present
        novel results comparing blind search over Chord and Pastry while under varying
        levels of churn. We further consider how different data replication strategies
        can be used to enhance the query success rate}, 
  www_section = {churn, complex querie, distributed hash table, search techniques}, 
  url = {https://bibliography.gnunet.org}, 
  isbn = {978-1-4244-8789-9}, 
  doi = {http://dx.doi.org/10.1109/CCNC.2011.5766542}, 
}
2011_3
@article{2011_3,
  title = {Distributed Private Data Analysis: On Simultaneously Solving How and What}, 
  author = {Amos Beimel and Kobbi Nissim and Eran Omri}, 
  journal = {CoRR}, 
  volume = {abs/1103.2626}, 
  year = {2011}, 
  abstract = {We examine the combination of two directions in the field of privacy
        concerning computations over distributed private inputs--secure function
        evaluation (SFE) and differential privacy. While in both the goal is to privately
        evaluate some function of the individual inputs, the privacy requirements are
        significantly different. The general feasibility results for SFE suggest a
        natural paradigm for implementing differentially private analyses distributively:
        First choose what to compute, i.e., a differentially private analysis; Then
        decide how to compute it, i.e., construct an SFE protocol for this analysis. We
        initiate an examination whether there are advantages to a paradigm where both
        decisions are made simultaneously. In particular, we investigate under which
        accuracy requirements it is beneficial to adapt this paradigm for computing a
        collection of functions including binary sum, gap threshold, and approximate
        median queries. Our results imply that when computing the binary sum of n
        distributed inputs then: * When we require that the error is o(n{\surd}) and the
        number of rounds is constant, there is no benefit in the new paradigm. * When we
        allow an error of O(n{\surd}), the new paradigm yields more efficient protocols
        when we consider protocols that compute symmetric functions. Our results also
        yield new separations between the local and global models of computations for
        private data analysis}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/DistributedPrivateData2008Beimel.pdf},
  www_section = {Unsorted}, 
  url = {https://bibliography.gnunet.org}, 
}
2011_4
@article{2011_4,
  title = {High-speed high-security signatures}, 
  author = {Daniel J. Bernstein and Niels Duif and Tanja Lange and Peter Schwabe and Bo-Yin
        Hang}, 
  journal = {Journal of Cryptographic Engineering}, 
  volume = {2}, 
  year = {2011}, 
  month = {September}, 
  chapter = {77}, 
  pages = {77--89}, 
  www_section = {ECC, Ed25519, EdDSA, GNUnet}, 
  url = {http://ed25519.cr.yp.to/papers.html}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/ed25519-20110926.pdf}, 
}
2011_5
@book{2011_5,
  title = {How Much Is Enough? Choosing {\epsilon} for Differential Privacy}, 
  author = {Lee, Jaewoo and Clifton, Chris}, 
  booktitle = {Information Security}, 
  organization = {Springer Berlin Heidelberg}, 
  volume = {7001}, 
  year = {2011}, 
  pages = {325--340}, 
  editor = {Lai, Xuejia and Zhou, Jianying and Li, Hui}, 
  publisher = {Springer Berlin Heidelberg}, 
  series = {Lecture Notes in Computer Science}, 
  abstract = {Differential privacy is a recent notion, and while it is nice conceptually it
        has been difficult to apply in practice. The parameters of differential privacy
        have an intuitive theoretical interpretation, but the implications and impacts on
        the risk of disclosure in practice have not yet been studied, and choosing
        appropriate values for them is non-trivial. Although the privacy parameter
        {\epsilon} in differential privacy is used to quantify the privacy risk posed by
        releasing statistics computed on sensitive data, {\epsilon} is not an absolute
        measure of privacy but rather a relative measure. In effect, even for the same
        value of {\epsilon} , the privacy guarantees enforced by differential privacy are
        different based on the domain of attribute in question and the query supported.
        We consider the probability of identifying any particular individual as being in
        the database, and demonstrate the challenge of setting the proper value of
        {\epsilon} given the goal of protecting individuals in the database with some
        fixed probability}, 
  www_section = {Differential Privacy, Privacy Parameter, epsilon}, 
  isbn = {978-3-642-24860-3}, 
  doi = {10.1007/978-3-642-24861-0_22}, 
  url = {http://dx.doi.org/10.1007/978-3-642-24861-0_22}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Choosing-\%CE\%B5-2011Lee.pdf},
}
2011_6
@mastersthesis{2011_6,
  title = {Methods for Secure Decentralized Routing in Open Networks}, 
  author = {Nathan S Evans}, 
  school = {Technische Universit{\"a}t M{\"u}nchen}, 
  volume = {Dr. rer. nat}, 
  year = {2011}, 
  month = {August}, 
  address = {Garching bei M{\"u}nchen}, 
  pages = {0--234}, 
  abstract = {The contribution of this thesis is the study and improvement of secure,
        decentralized, robust routing algorithms for open networks including ad-hoc
        networks and peer-to-peer (P2P) overlay networks. The main goals for our secure
        routing algorithm are openness, efficiency, scalability and resilience to various
        types of attacks. Common P2P routing algorithms trade-off decentralization for
        security; for instance by choosing whether or not to require a centralized
        authority to allow peers to join the network. Other algorithms trade scalability
        for security, for example employing random search or flooding to prevent certain
        types of attacks. Our design attempts to meet our security goals in an open
        system, while limiting the performance penalties incurred. The first step we took
        towards designing our routing algorithm was an analysis of the routing algorithm
        in Freenet. This algorithm is relevant because it achieves efficient (order O(log
        n)) routing in realistic network topologies in a fully decentralized open
        network. However, we demonstrate why their algorithm is not secure, as malicious
        participants are able to severely disrupt the operation of the network. The main
        difficulty with the Freenet routing algorithm is that for performance it relies
        on information received from untrusted peers. We also detail a range of proposed
        solutions, none of which we found to fully fix the problem. A related problem for
        efficient routing in sparsely connected networks is the difficulty in
        sufficiently populating routing tables. One way to improve connectivity in P2P
        overlay networks is by utilizing modern NAT traversal techniques. We employ a
        number of standard NAT traversal techniques in our approach, and also developed
        and experimented with a novel method for NAT traversal based on ICMP and UDP hole
        punching. Unlike other NAT traversal techniques ours does not require a trusted
        third party. Another technique we use in our implementation to help address the
        connectivity problem in sparse networks is the use of distance vector routing in
        a small local neighborhood. The distance vector variant used in our system
        employs onion routing to secure the resulting indirect connections. Materially to
        this design, we discovered a serious vulnerability in the Tor protocol which
        allowed us to use a DoS attack to reduce the anonymity of the users of this
        extant anonymizing P2P network. This vulnerability is based on allowing paths of
        unrestricted length for onion routes through the network. Analyzing Tor and
        implementing this attack gave us valuable knowledge which helped when designing
        the distance vector routing protocol for our system. Finally, we present the
        design of our new secure randomized routing algorithm that does not suffer from
        the various problems we discovered in previous designs. Goals for the algorithm
        include providing efficiency and robustness in the presence of malicious
        participants for an open, fully decentralized network without trusted
        authorities. We provide a mathematical analysis of the algorithm itself and have
        created and deployed an implementation of this algorithm in GNUnet. In this
        thesis we also provide a detailed overview of a distributed emulation framework
        capable of running a large number of nodes using our full code base as well as
        some of the challenges encountered in creating and using such a testing
        framework. We present extensive experimental results showing that our routing
        algorithm outperforms the dominant DHT design in target topologies, and performs
        comparably in other scenarios}, 
  www_section = {distributed hash table, Freenet, GNUnet, NAT, R5N, Tor}, 
  isbn = {3-937201-26-2}, 
  issn = {1868-2642}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/NET-2011-08-1.pdf}, 
  url = {https://bibliography.gnunet.org}, 
}
2011_7
@article{2011_7,
  title = {Multi-objective optimization based privacy preserving distributed data mining in
        Peer-to-Peer networks}, 
  author = {Das, Kamalika and Bhaduri, Kanishka and Kargupta, Hillol}, 
  journal = {Peer-to-Peer Networking and Applications}, 
  volume = {4}, 
  year = {2011}, 
  pages = {192--209}, 
  abstract = {This paper proposes a scalable, local privacy-preserving algorithm for
        distributed Peer-to-Peer (P2P) data aggregation useful for many advanced data
        mining/analysis tasks such as average/sum computation, decision tree induction,
        feature selection, and more. Unlike most multi-party privacy-preserving data
        mining algorithms, this approach works in an asynchronous manner through local
        interactions and it is highly scalable. It particularly deals with the
        distributed computation of the sum of a set of numbers stored at different peers
        in a P2P network in the context of a P2P web mining application. The proposed
        optimization-based privacy-preserving technique for computing the sum allows
        different peers to specify different privacy requirements without having to
        adhere to a global set of parameters for the chosen privacy model. Since
        distributed sum computation is a frequently used primitive, the proposed approach
        is likely to have significant impact on many data mining tasks such as
        multi-party privacy-preserving clustering, frequent itemset mining, and
        statistical aggregate computation}, 
  www_section = {Data mining, peer-to-peer, Privacy preserving}, 
  issn = {1936-6442}, 
  doi = {10.1007/s12083-010-0075-1}, 
  url = {http://dx.doi.org/10.1007/s12083-010-0075-1}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Privacy_PPNA2011Das.pdf},
}
2011_8
@mastersthesis{2011_8,
  title = {Privacy-Implications of Performance-Based Peer Selection by Onion-Routers: A
        Real-World Case Study using I2P}, 
  author = {Michael Herrmann}, 
  school = {Technische Universit{\"a}t M{\"u}nchen}, 
  volume = {M.S}, 
  year = {2011}, 
  month = {March}, 
  address = {Garching bei M{\"u}nchen}, 
  pages = {0--59}, 
  type = {M.S}, 
  abstract = {The Invisible Internet Project (I2P) is one of the most widely used
        anonymizing Peer-to-Peer networks on the Internet today. Like Tor, it uses onion
        routing to build tunnels between peers as the basis for providing anonymous
        communication channels. Unlike Tor, I2P integrates a range of anonymously hosted
        services directly with the platform. This thesis presents a new attack on the I2P
        Peer-to-Peer network, with the goal of determining the identity of peers that are
        anonymously hosting HTTP (Eepsite) services in the network. Key design choices
        made by I2P developers, in particular performance-based peer selection, enable a
        sophisticated adversary with modest resources to break key security assumptions.
        Our attack first obtains an estimate of the victim's view of the network. Then,
        the adversary selectively targets a small number of peers used by the victim with
        a denial-of-service attack while giving the victim the opportunity to replace
        those peers with other peers that are controlled by the adversary. Finally, the
        adversary performs some simple measurements to determine the identity of the peer
        hosting the service. This thesis provides the necessary background on I2P, gives
        details on the attack --- including experimental data from measurements against
        the actual I2P network --- and discusses possible solutions}, 
  www_section = {anonymity, attack, denial-of-service, I2P}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/herrmann2011mt.pdf}, 
  url = {https://bibliography.gnunet.org}, 
}
2011_9
@book{2011_9,
  title = {Private Similarity Computation in Distributed Systems: From Cryptography to
        Differential Privacy}, 
  author = {Alaggan, Mohammad and Gambs, S{\'e}bastien and Kermarrec, Anne-Marie}, 
  booktitle = {Principles of Distributed Systems}, 
  organization = {Springer Berlin Heidelberg}, 
  volume = {7109}, 
  year = {2011}, 
  pages = {357--377}, 
  editor = {Fern{\`a}ndez Anta, Antonio and Lipari, Giuseppe and Roy, Matthieu}, 
  publisher = {Springer Berlin Heidelberg}, 
  series = {Lecture Notes in Computer Science}, 
  abstract = {In this paper, we address the problem of computing the similarity between two
        users (according to their profiles) while preserving their privacy in a fully
        decentralized system and for the passive adversary model. First, we introduce a
        two-party protocol for privately computing a threshold version of the similarity
        and apply it to well-known similarity measures such as the scalar product and the
        cosine similarity. The output of this protocol is only one bit of information
        telling whether or not two users are similar beyond a predetermined threshold.
        Afterwards, we explore the computation of the exact and threshold similarity
        within the context of differential privacy. Differential privacy is a recent
        notion developed within the field of private data analysis guaranteeing that an
        adversary that observes the output of the differentially private mechanism, will
        only gain a negligible advantage (up to a privacy parameter) from the presence
        (or absence) of a particular item in the profile of a user. This provides a
        strong privacy guarantee that holds independently of the auxiliary knowledge that
        the adversary might have. More specifically, we design several differentially
        private variants of the exact and threshold protocols that rely on the addition
        of random noise tailored to the sensitivity of the considered similarity measure.
        We also analyze their complexity as well as their impact on the utility of the
        resulting similarity measure. Finally, we provide experimental results validating
        the effectiveness of the proposed approach on real datasets}, 
  www_section = {Differential Privacy, homomorphic encryption, privacy, similarity
        measure}, 
  isbn = {978-3-642-25872-5}, 
  doi = {10.1007/978-3-642-25873-2_25}, 
  url = {http://dx.doi.org/10.1007/978-3-642-25873-2_25}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/PrivateSimilarity2011Alaggan.pdf},
}
2012_0
@book{2012_0,
  title = {BLIP: Non-interactive Differentially-Private Similarity Computation on Bloom
        filters}, 
  author = {Alaggan, Mohammad and Gambs, S{\'e}bastien and Kermarrec, Anne-Marie}, 
  booktitle = {Stabilization, Safety, and Security of Distributed Systems}, 
  organization = {Springer Berlin Heidelberg}, 
  volume = {7596}, 
  year = {2012}, 
  pages = {202--216}, 
  editor = {Richa, Andr{\'e}aW. and Scheideler, Christian}, 
  publisher = {Springer Berlin Heidelberg}, 
  series = {Lecture Notes in Computer Science}, 
  abstract = {In this paper, we consider the scenario in which the profile of a user is
        represented in a compact way, as a Bloom filter, and the main objective is to
        privately compute in a distributed manner the similarity between users by relying
        only on the Bloom filter representation. In particular, we aim at providing a
        high level of privacy with respect to the profile even if a potentially unbounded
        number of similarity computations take place, thus calling for a non-interactive
        mechanism. To achieve this, we propose a novel non-interactive differentially
        private mechanism called BLIP (for BLoom-and-flIP) for randomizing Bloom filters.
        This approach relies on a bit flipping mechanism and offers high privacy
        guarantees while maintaining a small communication cost. Another advantage of
        this non-interactive mechanism is that similarity computation can take place even
        when the user is offline, which is impossible to achieve with interactive
        mechanisms. Another of our contributions is the definition of a probabilistic
        inference attack, called the {\textquotedblleft}Profile Reconstruction
        attack{\textquotedblright}, that can be used to reconstruct the profile of an
        individual from his Bloom filter representation. More specifically, we provide an
        analysis of the protection offered by BLIP against this profile reconstruction
        attack by deriving an upper and lower bound for the required value of the
        differential privacy parameter {\epsilon}}, 
  isbn = {978-3-642-33535-8}, 
  doi = {10.1007/978-3-642-33536-5_20}, 
  url = {http://dx.doi.org/10.1007/978-3-642-33536-5_20}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/BLIP2012Alaggan.pdf}, 
  www_section = {Unsorted}, 
}
2012_1
@conference{2012_1,
  title = {CRISP: Collusion-resistant Incentive-compatible Routing and Forwarding in
        Opportunistic Networks}, 
  author = {Sadiq, Umair and Kumar, Mohan and Wright, Matthew}, 
  booktitle = {Proceedings of the 15th ACM International Conference on Modeling, Analysis
        and Simulation of Wireless and Mobile Systems}, 
  organization = {ACM}, 
  year = {2012}, 
  address = {New York, NY, USA}, 
  publisher = {ACM}, 
  www_section = {black-hole attack, collusion, credit schemes, delay tolerant networks,
        flooding, incentive schemes, mobile peer-to-peer networks, opportunistic
        networks}, 
  isbn = {978-1-4503-1628-6}, 
  doi = {10.1145/2387238.2387253}, 
  url = {http://doi.acm.org/10.1145/2387238.2387253}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/crisp-mswim.pdf}, 
}
2012_10
@conference{2012_10,
  title = {NTALG--TCP NAT traversal with application-level gateways}, 
  author = {Wander, M. and Holzapfel, S. and Wacker, A. and Weis, T.}, 
  booktitle = {Consumer Communications and Networking Conference (CCNC), 2012 IEEE}, 
  year = {2012}, 
  abstract = {Consumer computers or home communication devices are usually connected to the
        Internet via a Network Address Translation (NAT) router. This imposes
        restrictions for networking applications that require inbound connections.
        Existing solutions for NAT traversal can remedy the restrictions, but still there
        is a fraction of home users which lack support of it, especially when it comes to
        TCP. We present a framework for traversing NAT routers by exploiting their
        built-in FTP and IRC application-level gateways (ALG) for arbitrary TCP-based
        applications. While this does not work in every scenario, it significantly
        improves the success chance without requiring any user interaction at all. To
        demonstrate the framework, we show a small test setup with laptop computers and
        home NAT routers}, 
  www_section = {FTP-ALG, NAT}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/WHW_12-NTALG.pdf}, 
  url = {https://bibliography.gnunet.org}, 
}
2012_11
@article{2012_11,
  title = {Octopus: A Secure and Anonymous DHT Lookup}, 
  author = {Wang, Qiyan and Borisov, Nikita}, 
  journal = {CoRR}, 
  volume = {abs/1203.2668}, 
  year = {2012}, 
  www_section = {anonymity, distributed hash table}, 
  url = {http://dblp.uni-trier.de/db/journals/corr/corr1203.html$\#$abs-1203-2668}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/octopus_dht.pdf}, 
}
2012_12
@article{2012_12,
  title = {Personalization and privacy: a survey of privacy risks and remedies in
        personalization-based systems}, 
  author = {Toch, Eran and Wang, Yang and Cranor, LorrieFaith}, 
  journal = {User Modeling and User-Adapted Interaction}, 
  volume = {22}, 
  year = {2012}, 
  pages = {203--220}, 
  abstract = {Personalization technologies offer powerful tools for enhancing the user
        experience in a wide variety of systems, but at the same time raise new privacy
        concerns. For example, systems that personalize advertisements according to the
        physical location of the user or according to the user's friends' search history,
        introduce new privacy risks that may discourage wide adoption of personalization
        technologies. This article analyzes the privacy risks associated with several
        current and prominent personalization trends, namely social-based
        personalization, behavioral profiling, and location-based personalization. We
        survey user attitudes towards privacy and personalization, as well as
        technologies that can help reduce privacy risks. We conclude with a discussion
        that frames risks and technical solutions in the intersection between
        personalization and privacy, as well as areas for further investigation. This
        frameworks can help designers and researchers to contextualize privacy challenges
        of solutions when designing personalization systems}, 
  www_section = {e-commerce, Human--computer interaction, Location-based services,
        Personalization, privacy, social networks}, 
  issn = {0924-1868}, 
  doi = {10.1007/s11257-011-9110-z}, 
  url = {http://dx.doi.org/10.1007/s11257-011-9110-z}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Personalization2012Toch.pdf},
}
2012_13
@article{2012_13,
  title = {Saturn: Range Queries, Load Balancing and Fault Tolerance in DHT Data Systems}, 
  author = {Theoni Pitoura and Nikos Ntarmos and Peter Triantafillou}, 
  journal = {IEEE Transactions on Knowledge and Data Engineering}, 
  volume = {24}, 
  year = {2012}, 
  month = {July}, 
  chapter = {1313}, 
  abstract = {In this paper, we present Saturn, an overlay architecture for large-scale
        data networks maintained over Distributed Hash Tables (DHTs) that efficiently
        processes range queries and ensures access load balancing and fault-tolerance.
        Placing consecutive data values in neighboring peers is desirable in DHTs since
        it accelerates range query processing; however, such a placement is highly
        susceptible to load imbalances. At the same time, DHTs may be susceptible to node
        departures/failures and high data availability and fault tolerance are
        significant issues. Saturn deals effectively with these problems through the
        introduction of a novel multiple ring, order-preserving architecture. The use of
        a novel order-preserving hash function ensures fast range query processing.
        Replication across and within data rings (termed vertical and horizontal
        replication) forms the foundation over which our mechanisms are developed,
        ensuring query load balancing and fault tolerance, respectively. Our detailed
        experimentation study shows strong gains in range query processing efficiency,
        access load balancing, and fault tolerance, with low replication overheads. The
        significance of Saturn is not only that it effectively tackles all three issues
        together{\textemdash}i.e., supporting range queries, ensuring load balancing, and
        providing fault tolerance over DHTs{\textemdash}but also that it can be applied
        on top of any order-preserving DHT enabling it to dynamically handle replication
        and, thus, to trade off replication costs for fair load distribution and fault
        tolerance}, 
  www_section = {distributed hash table, load balancing, range queries, Saturn}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/saturn-range-dht.pdf}, 
  url = {https://bibliography.gnunet.org}, 
}
2012_14
@article{2012_14,
  title = {The state-of-the-art in personalized recommender systems for social networking}, 
  author = {Zhou, Xujuan and Xu, Yue and Li, Yuefeng and Josang, Audun and Cox, Clive}, 
  journal = {Artificial Intelligence Review}, 
  volume = {37}, 
  year = {2012}, 
  pages = {119--132}, 
  abstract = {With the explosion of Web 2.0 application such as blogs, social and
        professional networks, and various other types of social media, the rich online
        information and various new sources of knowledge flood users and hence pose a
        great challenge in terms of information overload. It is critical to use
        intelligent agent software systems to assist users in finding the right
        information from an abundance of Web data. Recommender systems can help users
        deal with information overload problem efficiently by suggesting items (e.g.,
        information and products) that match users' personal interests. The recommender
        technology has been successfully employed in many applications such as
        recommending films, music, books, etc. The purpose of this report is to give an
        overview of existing technologies for building personalized recommender systems
        in social networking environment, to propose a research direction for addressing
        user profiling and cold start problems by exploiting user-generated content newly
        available in Web 2.0}, 
  www_section = {recommender systems, Social networking, trust, User generated content,
        user profiles}, 
  issn = {0269-2821}, 
  doi = {10.1007/s10462-011-9222-1}, 
  url = {http://dx.doi.org/10.1007/s10462-011-9222-1}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/PersonalizedRecommender2012Zhou.pdf},
}
2012_15
@article{2012_15,
  title = {A Survey of Monte Carlo Tree Search Methods}, 
  author = {Cameron Browne and Edward Powley and Daniel Whitehouse and Simon Lucas and
        Peter I. Cowling and Philipp Rohlfshagen and Stephen Tavener and Diego Perez and
        Spyridon Samothrakis and Simon Colton}, 
  journal = {IEEE Transactions on Computational Intelligence and AI in Games}, 
  volume = {4}, 
  year = {2012}, 
  month = {March}, 
  pages = {1--43}, 
  abstract = {Monte Carlo tree search (MCTS) is a recently proposed search method that
        combines the precision of tree search with the generality of random sampling. It
        has received considerable interest due to its spectacular success in the
        difficult problem of computer Go, but has also proved beneficial in a range of
        other domains. This paper is a survey of the literature to date, intended to
        provide a snapshot of the state of the art after the first five years of MCTS
        research. We outline the core algorithm's derivation, impart some structure on
        the many variations and enhancements that have been proposed, and summarize the
        results from the key game and nongame domains to which MCTS methods have been
        applied. A number of open research questions indicate that the field is ripe for
        future work}, 
  www_section = {AI, artificial intelligence, bandit-based methods, computer go., game
        search, MCTS, monte carlo tree search, UCB, UCT, upper confidence bounds, upper
        confidence bounds for trees}, 
  issn = {1943-068X}, 
  doi = {10.1109/TCIAIG.2012.2186810}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Browne\%20et\%20al\%20-\%20A\%20survey\%20of\%20MCTS\%20methods.pdf},
  url = {https://bibliography.gnunet.org}, 
}
2012_16
@article{2012_16,
  title = {Theory and Practice of Bloom Filters for Distributed Systems}, 
  author = {Tarkoma, S. and Rothenberg, C.E. and Lagerspetz, E.}, 
  journal = {Communications Surveys Tutorials, IEEE}, 
  volume = {14}, 
  year = {2012}, 
  month = {January}, 
  pages = {131--155}, 
  abstract = {Many network solutions and overlay networks utilize probabilistic techniques
        to reduce information processing and networking costs. This survey article
        presents a number of frequently used and useful probabilistic techniques. Bloom
        filters and their variants are of prime importance, and they are heavily used in
        various distributed systems. This has been reflected in recent research and many
        new algorithms have been proposed for distributed systems that are either
        directly or indirectly based on Bloom filters. In this survey, we give an
        overview of the basic and advanced techniques, reviewing over 20 variants and
        discussing their application in distributed systems, in particular for caching,
        peer-to-peer systems, routing and forwarding, and measurement data
        summarization}, 
  www_section = {Unsorted}, 
  issn = {1553-877X}, 
  doi = {10.1109/SURV.2011.031611.00024}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/TheoryandPracticeBloomFilter2011Tarkoma.pdf},
  url = {https://bibliography.gnunet.org}, 
}
2012_17
@conference{2012_17,
  title = {User Interests Driven Web Personalization Based on Multiple Social Networks}, 
  author = {Zeng, Yi and Zhong, Ning and Ren, Xu and Wang, Yan}, 
  booktitle = {Proceedings of the 4th International Workshop on Web Intelligence
        \&\#38; Communities}, 
  organization = {ACM}, 
  year = {2012}, 
  address = {New York, NY, USA}, 
  publisher = {ACM}, 
  abstract = {User related data indicate user interests in a certain environment. In the
        context of massive data from the Web, if an application wants to provide more
        personalized service (e.g. search) for users, an investigation on user interests
        is needed. User interests are usually distributed in different sources. In order
        to provide a more comprehensive understanding, user related data from multiple
        sources need to be integrated together for deeper analysis. Web based social
        networks have become typical platforms for extracting user interests. In
        addition, there are various types of interests from these social networks. In
        this paper, we provide an algorithmic framework for retrieving semantic data
        based on user interests from multiple sources (such as multiple social networking
        sites). We design several algorithms to deal with interests based retrieval based
        on single and multiple types of interests. We utilize publication data from
        Semantic Web Dog Food (which can be considered as an academic collaboration based
        social network), and microblogging data from Twitter to validate our framework.
        The Active Academic Visit Recommendation Application (AAVRA) is developed as a
        concrete usecase to show the potential effectiveness of the proposed framework
        for user interests driven Web personalization based on multiple social networks}, 
  www_section = {interest analysis, search refinement, web personalization}, 
  isbn = {978-1-4503-1189-2}, 
  doi = {10.1145/2189736.2189749}, 
  url = {http://doi.acm.org/10.1145/2189736.2189749}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/WebPersonalization2012Zeng.pdf},
}
2012_2
@mastersthesis{2012_2,
  title = {Decentralized Evaluation of Regular Expressions for Capability Discovery in
        Peer-to-Peer Networks}, 
  author = {Maximilian Szengel}, 
  school = {Technische Universit{\"a}t M{\"u}nchen}, 
  volume = {M.S}, 
  year = {2012}, 
  month = {November}, 
  address = {Garching bei M{\"u}nchen}, 
  pages = {0--100}, 
  type = {Masters}, 
  abstract = {This thesis presents a novel approach for decentralized evaluation of regular
        expressions for capability discovery in DHT-based overlays. The system provides
        support for announcing capabilities expressed as regular expressions and
        discovering participants offering adequate capabilities. The idea behind our
        approach is to convert regular expressions into finite automatons and store the
        corresponding states and transitions in a DHT. We show how locally constructed
        DFA are merged in the DHT into an NFA without the knowledge of any NFA already
        present in the DHT and without the need for any central authority. Furthermore we
        present options of optimizing the DFA. There exist several possible applications
        for this general approach of decentralized regular expression evaluation.
        However, in this thesis we focus on the application of discovering users that are
        willing to provide network access using a specified protocol to a particular
        destination. We have implemented the system for our proposed approach and
        conducted a simulation. Moreover we present the results of an emulation of the
        implemented system in a cluster}, 
  www_section = {DFA, distributed hash table, GNUnet, NFA, regular expressions, search}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/szengel2012ms.pdf}, 
  url = {https://bibliography.gnunet.org}, 
}
2012_3
@mastersthesis{2012_3,
  title = {Design and Implementation of a Censorship Resistant and Fully Decentralized Name
        System}, 
  author = {Martin Schanzenbach}, 
  school = {TU Munich}, 
  volume = {M.Sc}, 
  year = {2012}, 
  month = {September}, 
  address = {Garching bei M{\"u}nchen}, 
  pages = {0--116}, 
  type = {Master's}, 
  abstract = {This thesis presents the design and implementation of the GNU Alternative
        Domain System (GADS), a decentralized, secure name system providing memorable
        names for the Internet as an alternative to the Domain Name System (DNS). The
        system builds on ideas from Rivest's Simple Distributed Security Infrastructure
        (SDSI) to address a central issue with providing a decentralized mapping of
        secure identifiers to memorable names: providing a global, secure and memorable
        mapping is impossible without a trusted authority. SDSI offers an alternative by
        linking local name spaces; GADS uses the transitivity provided by the SDSI design
        to build a decentralized and censorship resistant name system without a trusted
        root based on secure delegation of authority. Additional details need to be
        considered in order to enable GADS to integrate smoothly with the World Wide Web.
        While following links on the Web matches following delegations in GADS, the
        existing HTTP-based infrastructure makes many assumptions about globally unique
        names; however, proxies can be used to enable legacy applications to function
        with GADS. This work presents the fundamental goals and ideas behind GADS,
        provides technical details on how GADS has been implemented and discusses
        deployment issues for using GADS with existing systems. We discuss how GADS and
        legacy DNS can interoperate during a transition period and what additional
        security advantages GADS offers over DNS with Security Extensions (DNSSEC).
        Finally, we present the results of a survey into surfing behavior, which suggests
        that the manual introduction of new direct links in GADS will be infrequent}, 
  www_section = {censorship resistance, decentralized, DNS, GNU Name System, GNUnet}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/schanzen2012msc.pdf}, 
  url = {https://bibliography.gnunet.org}, 
}
2012_4
@book{2012_4,
  title = {Differential Privacy with Imperfect Randomness}, 
  author = {Dodis, Yevgeniy and L{\'o}pez-Alt, Adriana and Mironov, Ilya and Vadhan,
        Salil}, 
  booktitle = {Advances in Cryptology -- CRYPTO 2012}, 
  organization = {Springer Berlin Heidelberg}, 
  volume = {7417}, 
  year = {2012}, 
  pages = {497--516}, 
  editor = {Safavi-Naini, Reihaneh and Canetti, Ran}, 
  publisher = {Springer Berlin Heidelberg}, 
  series = {Lecture Notes in Computer Science}, 
  abstract = {In this work we revisit the question of basing cryptography on imperfect
        randomness. Bosley and Dodis (TCC'07) showed that if a source of randomness R is
        {\textquotedblleft}good enough{\textquotedblright} to generate a secret key
        capable of encrypting k bits, then one can deterministically extract nearly k
        almost uniform bits from R, suggesting that traditional privacy notions (namely,
        indistinguishability of encryption) requires an
        {\textquotedblleft}extractable{\textquotedblright} source of randomness. Other,
        even stronger impossibility results are known for achieving privacy under
        specific {\textquotedblleft}non-extractable{\textquotedblright} sources of
        randomness, such as the {\gamma}-Santha-Vazirani (SV) source, where each next bit
        has fresh entropy, but is allowed to have a small bias {\gamma} < 1 (possibly
        depending on prior bits). We ask whether similar negative results also hold for a
        more recent notion of privacy called differential privacy (Dwork et al., TCC'06),
        concentrating, in particular, on achieving differential privacy with the
        Santha-Vazirani source. We show that the answer is no. Specifically, we give a
        differentially private mechanism for approximating arbitrary
        {\textquotedblleft}low sensitivity{\textquotedblright} functions that works even
        with randomness coming from a {\gamma}-Santha-Vazirani source, for any {\gamma} <
        1. This provides a somewhat surprising
        {\textquotedblleft}separation{\textquotedblright} between traditional privacy and
        differential privacy with respect to imperfect randomness. Interestingly, the
        design of our mechanism is quite different from the traditional
        {\textquotedblleft}additive-noise{\textquotedblright} mechanisms (e.g., Laplace
        mechanism) successfully utilized to achieve differential privacy with perfect
        randomness. Indeed, we show that any (non-trivial)
        {\textquotedblleft}SV-robust{\textquotedblright} mechanism for our problem
        requires a demanding property called consistent sampling, which is strictly
        stronger than differential privacy, and cannot be satisfied by any additive-noise
        mechanism}, 
  isbn = {978-3-642-32008-8}, 
  doi = {10.1007/978-3-642-32009-5_29}, 
  url = {http://dx.doi.org/10.1007/978-3-642-32009-5_29}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/DPwithImperfectRandomness2012Dodis.pdf},
  www_section = {Unsorted}, 
}
2012_5
@article{2012_5,
  title = {Efficient and Secure Decentralized Network Size Estimation}, 
  author = {Nathan S Evans and Polot, Bartlomiej and Christian Grothoff}, 
  journal = {unknown}, 
  institution = {Technische Universit{\"a}t M{\"u}nchen}, 
  year = {2012}, 
  month = {May}, 
  address = {Garching bei M{\"u}nchen}, 
  abstract = {The size of a Peer-to-Peer (P2P) network is an important parameter for
        performance tuning of P2P routing algorithms. This paper introduces and evaluates
        a new efficient method for participants in an unstructured P2P network to
        establish the size of the overall network. The presented method is highly
        efficient, propagating information about the current size of the network to all
        participants using O(|E|) operations where |E| is the number of edges in the
        network. Afterwards, all nodes have the same network size estimate, which can be
        made arbitrarily accurate by averaging results from multiple rounds of the
        protocol. Security measures are included which make it prohibitively expensive
        for a typical active participating adversary to significantly manipulate the
        estimates. This paper includes experimental results that demonstrate the
        viability, efficiency and accuracy of the protocol}, 
  www_section = {GNUnet, network security, network size estimation, peer-to-peer
        networking}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/nse-techreport.pdf}, 
  url = {https://bibliography.gnunet.org}, 
}
2012_6
@conference{2012_6,
  title = {Efficient and Secure Decentralized Network Size Estimation}, 
  author = {Nathan S Evans and Polot, Bartlomiej and Christian Grothoff}, 
  booktitle = {IFIP International Conferences on Networking (Networking 2012)}, 
  organization = {Springer Verlag}, 
  year = {2012}, 
  month = {May}, 
  address = {Prague, CZ}, 
  pages = {304--317}, 
  publisher = {Springer Verlag}, 
  abstract = {The size of a Peer-to-Peer (P2P) network is an important parameter for
        performance tuning of P2P routing algorithms. This paper introduces and evaluates
        a new efficient method for participants in an unstructured P2P network to
        establish the size of the overall network. The presented method is highly
        efficient, propagating information about the current size of the network to all
        participants using O(|E|) operations where |E| is the number of edges in the
        network. Afterwards, all nodes have the same network size estimate, which can be
        made arbitrarily accurate by averaging results from multiple rounds of the
        protocol. Security measures are included which make it prohibitively expensive
        for a typical active participating adversary to significantly manipulate the
        estimates. This paper includes experimental results that demonstrate the
        viability, efficiency and accuracy of the protocol}, 
  www_section = {byzantine fault tolerance, GNUnet, network size estimation, proof of
        work}, 
  url = {http://grothoff.org/christian/rrsize2012.pdf}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/paper-ifip.pdf}, 
}
2012_7
@conference{2012_7,
  title = {Koi: A Location-Privacy Platform for Smartphone Apps}, 
  author = {Saikat Guha and Mudit Jain and Venkata Padmanabhan}, 
  booktitle = {Proceedings of the 9th Symposium on Networked Systems Design and
        Implementation (NSDI)}, 
  year = {2012}, 
  month = {April}, 
  address = {San Jose, CA}, 
  abstract = {With mobile phones becoming first-class citizens in the online world, the
        rich location data they bring to the table is set to revolutionize all aspects of
        online life including content delivery, recommendation systems, and advertising.
        However, user-tracking is a concern with such location-based services, not only
        because location data can be linked uniquely to individuals, but because the
        low-level nature of current location APIs and the resulting dependence on the
        cloud to synthesize useful representations virtually guarantees such tracking. In
        this paper, we propose privacy-preserving location-based matching as a
        fundamental platform primitive and as an alternative to exposing low-level,
        latitude-longitude (lat-long) coordinates to applications. Applications set rich
        location-based triggers and have these be fired based on location updates either
        from the local device or from a remote device (e.g., a friend's phone). Our Koi
        platform, comprising a privacy-preserving matching service in the cloud and a
        phone-based agent, realizes this primitive across multiple phone and browser
        platforms. By masking low-level lat-long information from applications, Koi not
        only avoids leaking privacy-sensitive information, it also eases the task of
        programmers by providing a higher-level abstraction that is easier for
        applications to build upon. Koi's privacy-preserving protocol prevents the cloud
        service from tracking users. We verify the non-tracking properties of Koi using a
        theorem prover, illustrate how privacy guarantees can easily be added to a wide
        range of location-based applications, and show that our public deployment is
        performant, being able to perform 12K matches per second on a single core}, 
  www_section = {location privacy, matching}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/nsdi12-koi.pdf}, 
  url = {https://bibliography.gnunet.org}, 
}
2012_8
@book{2012_8,
  title = {Lower Bounds in Differential Privacy}, 
  author = {De, Anindya}, 
  booktitle = {Theory of Cryptography}, 
  organization = {Springer Berlin Heidelberg}, 
  volume = {7194}, 
  year = {2012}, 
  pages = {321--338}, 
  editor = {Cramer, Ronald}, 
  publisher = {Springer Berlin Heidelberg}, 
  series = {Lecture Notes in Computer Science}, 
  abstract = {This paper is about private data analysis, in which a trusted curator holding
        a confidential database responds to real vector-valued queries. A common approach
        to ensuring privacy for the database elements is to add appropriately generated
        random noise to the answers, releasing only these noisy responses. A line of
        study initiated in [7] examines the amount of distortion needed to prevent
        privacy violations of various kinds. The results in the literature vary according
        to several parameters, including the size of the database, the size of the
        universe from which data elements are drawn, the
        {\textquotedblleft}amount{\textquotedblright} of privacy desired, and for the
        purposes of the current work, the arity of the query. In this paper we sharpen
        and unify these bounds. Our foremost result combines the techniques of Hardt and
        Talwar [11] and McGregor et al. [13] to obtain linear lower bounds on distortion
        when providing differential privacy for a (contrived) class of low-sensitivity
        queries. (A query has low sensitivity if the data of a single individual has
        small effect on the answer.) Several structural results follow as immediate
        corollaries: We separate so-called counting queries from arbitrary
        low-sensitivity queries, proving the latter requires more noise, or distortion,
        than does the former; We separate ({\epsilon},0)-differential privacy from its
        well-studied relaxation ({\epsilon},{\delta})-differential privacy, even when
        {\delta} {\epsilon} 2- o(n) is negligible in the size n of the database, proving
        the latter requires less distortion than the former; We demonstrate that
        ({\epsilon},{\delta})-differential privacy is much weaker than
        ({\epsilon},0)-differential privacy in terms of mutual information of the
        transcript of the mechanism with the database, even when {\delta} {\epsilon} 2-
        o(n) is negligible in the size n of the database. We also simplify the lower
        bounds on noise for counting queries in [11] and also make them unconditional.
        Further, we use a characterization of ({\epsilon},{\delta}) differential privacy
        from [13] to obtain lower bounds on the distortion needed to ensure
        ({\epsilon},{\delta})-differential privacy for {\epsilon},{\delta} > 0. We next
        revisit the LP decoding argument of [10] and combine it with a recent result of
        Rudelson [15] to improve on a result of Kasiviswanathan et al. [12] on noise
        lower bounds for privately releasing l-way marginals}, 
  www_section = {Differential Privacy, LP decoding}, 
  isbn = {978-3-642-28913-2}, 
  doi = {10.1007/978-3-642-28914-9_18}, 
  url = {http://dx.doi.org/10.1007/978-3-642-28914-9_18}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/LowerBoundsDP2012De.pdf},
}
2012_9
@mastersthesis{2012_9,
  title = {Monkey: Automated debugging of deployed distributed systems}, 
  author = {Safey A. Halim}, 
  school = {Technische Universit{\"a}t M{\"u}nchen}, 
  volume = {M.S}, 
  year = {2012}, 
  month = {July}, 
  address = {Garching bei M{\"u}nchen}, 
  pages = {0--78}, 
  type = {Masters}, 
  abstract = {Debugging is tedious and time consuming work that, for certain types of bugs,
        can and should be automated. Debugging distributed systems is more complex due to
        time dependencies between interacting processes. Another related problem is
        duplicate bug reports in bug repositories. Finding bug duplicates is hard and
        wastes developers' time which may affect the development team's rate of bug fixes
        and new releases. In this master thesis we introduce Monkey, a new tool that
        provides a solution for automated classification, investigation and
        characterization of bugs, as well as a solution for comparing bug reports and
        avoiding duplicates. Our tool is particularly suitable for distributed systems
        due to its autonomy. We present Monkey's key design goals and architecture and
        give experimental results demonstrating the viability of our approach}, 
  www_section = {automation, debugging, distributed systems}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/safey-thesis-monkey.pdf
        , https://git.gnunet.org/bibliography.git/plain/docs/safey-presentation-monkey.pdf},
  url = {https://bibliography.gnunet.org}, 
}
2013_1
@mastersthesis{2013_1,
  title = {Large Scale Distributed Evaluation of Peer-to-Peer Protocols}, 
  author = {Totakura, Sree Harsha}, 
  school = {Technische Universit{\"a}t M{\"u}nchen}, 
  volume = {Master of Science}, 
  year = {2013}, 
  month = {June}, 
  address = {Garching bei M{\"u}nchen}, 
  pages = {0--76}, 
  type = {Masters}, 
  abstract = {Evaluations of P2P protocols during the system's design and implementation
        phases are commonly done through simulation and emulation respectively. While the
        current state-of-the-art simulation allows evaluations with many millions of
        peers through the use of abstractions, emulation still lags behind as it involves
        executing the real implementation at some parts of the system. This difference in
        scales can make it hard to relate the evaluations made created with simulation
        and emulation during the design and implementation phases and can results in a
        limited evaluation of the implementation, which may cause severe problems after
        deployment. In this thesis, we build upon an existing emulator for P2P
        applications to push the scales offered by emulation towards the limits set by
        simulation. Our approach distributes and co-ordinates the emulation across many
        hosts. Large deployments are possible by deploying hundreds or thousands of peers
        on each host. To address the varying needs of an experimenter and the range of
        available hardware, we make our approach scalable such that it can easily be
        adapted to run evaluations on a single machine or a large group of hosts.
        Specifically, the system automatically adjusts the number of overlapping
        operations to the available resources efficiently using a feedback mechanism,
        thus relieving the experimenter from the hassles of manual tuning. We
        specifically target HPC systems like compute clusters and supercomputers and
        demonstrate how such systems can be used for large scale emulations by evaluating
        two P2P applications with deployment sizes up to 90k peers on a supercomputer}, 
  www_section = {Unsorted}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/thesis_lowres.pdf ,
        https://git.gnunet.org/bibliography.git/plain/docs/thesis.pdf}, 
  url = {https://bibliography.gnunet.org}, 
}
2013_2
@mastersthesis{2013_2,
  title = {Monkey--Generating Useful Bug Reports Automatically}, 
  author = {Markus Teich}, 
  school = {Technische Universit{\"a}t M{\"u}nchen}, 
  volume = {Bachelor}, 
  year = {2013}, 
  month = {July}, 
  address = {Munich}, 
  pages = {0--50}, 
  type = {Bachelor Thesis}, 
  abstract = {Automatic crash handlers support software developers in finding bugs and
        fixing the problems in their code. Most of them behave similarly in providing the
        developer with a (symbolic) stack trace and a memory dump of the crashed
        application. This introduces some problems that we try to fix with our proposed
        automatic bug reporting system called "Monkey". In this paper we describe the
        problems that occur when debugging widely distributed systems and how Monkey
        handles them. First, we describe our Motivation for develop- ing the Monkey
        system. Afterwards we present the most common existing automatic crash handlers
        and how they work. Thirdly you will get an overview of the Monkey system and its
        components. In the fourth chapter we will analyze one report gener- ated by
        Monkey, evaluate an online experiment we conducted and present some of our
        finding during the development of the clustering algorithm used to categorize
        crash reports. Last, we discuss some of Monkeys features and compare them to the
        existing approaches. Also some ideas for the future development of the Monkey
        system are presented before we conclude that Monkey's approach is promising, but
        some work is still left to establish Monkey in the open source community}, 
  www_section = {Unsorted}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/main_0.pdf}, 
  url = {https://bibliography.gnunet.org}, 
}
2013_3
@conference{2013_3,
  title = {Persea: A Sybil-resistant Social DHT}, 
  author = {Al-Ameen, Mahdi N. and Matthew Wright}, 
  booktitle = {Proceedings of the Third ACM Conference on Data and Application Security and
        Privacy}, 
  organization = {ACM}, 
  year = {2013}, 
  address = {New York, NY, USA}, 
  publisher = {ACM}, 
  abstract = {P2P systems are inherently vulnerable to Sybil attacks, in which an attacker
        can have a large number of identities and use them to control a substantial
        fraction of the system. We propose Persea, a novel P2P system that is more robust
        against Sybil attacks than prior approaches. Persea derives its Sybil resistance
        by assigning IDs through a bootstrap tree, the graph of how nodes have joined the
        system through invitations. More specifically, a node joins Persea when it gets
        an invitation from an existing node in the system. The inviting node assigns a
        node ID to the joining node and gives it a chunk of node IDs for further
        distribution. For each chunk of ID space, the attacker needs to socially engineer
        a connection to another node already in the system. This hierarchical
        distribution of node IDs confines a large attacker botnet to a considerably
        smaller region of the ID space than in a normal P2P system. Persea uses a
        replication mechanism in which each (key,value) pair is stored in nodes that are
        evenly spaced over the network. Thus, even if a given region is occupied by
        attackers, the desired (key,value) pair can be retrieved from other regions. We
        compare our results with Kad, Whanau, and X-Vine and show that Persea is a better
        solution against Sybil attacks. collapse}, 
  www_section = {security, social dht, Sybil attack}, 
  isbn = {978-1-4503-1890-7}, 
  doi = {10.1145/2435349.2435372}, 
  url = {http://doi.acm.org.eaccess.ub.tum.de/10.1145/2435349.2435372}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/p169-al-ameen.pdf}, 
}
2013_4
@article{2013_4,
  title = {Public Key Pinning for TLS Using a Trust on First Use Model}, 
  author = {Gabor X Toth}, 
  journal = {unknown}, 
  year = {2013}, 
  editor = {Tjebbe Vlieg}, 
  abstract = {Although the Public Key Infrastructure (PKI) using X.509 is meant to prevent
        the occurrence of man-in-the-middle attacks on TLS, there are still situations in
        which such attacks are possible due to the large number of Certification
        Authorities (CA) that has to be trusted. Recent incidents involving CA
        compromises, which lead to issuance of rogue certificates indicate the weakness
        of the PKI model. Recently various public key pinning protocols -- such as DANE
        or TACK -- have been proposed to thwart man-in-the-middle attacks on TLS
        connections. It will take a longer time, however, until any of these protocols
        reach wide deployment. We present an approach intended as an interim solution to
        bridge this gap and provide protection for connections to servers not yet using a
        pinning protocol. The presented method is based on public key pinning with a
        trust on first use model, and can be combined with existing notary approaches as
        well}, 
  www_section = {Unsorted}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/tofu-pinning.pdf}, 
  url = {https://bibliography.gnunet.org}, 
}
2013_5
@mastersthesis{2013_5,
  title = {Speeding Up Tor with SPDY}, 
  author = {Andrey Uzunov}, 
  school = {Technische Universit{\"a}t M{\"u}nchen}, 
  volume = {Master's in Computer Science}, 
  year = {2013}, 
  month = {November}, 
  address = {Garching bei M{\"u}nchen}, 
  pages = {0--124}, 
  type = {Master's}, 
  abstract = {SPDY is a rather new protocol which is an alternative to HTTP. It was
        designed to address inefficiencies in the latter and thereby improve latency and
        reduce bandwidth consumption. This thesis presents the design and implementation
        of a setup for utilizing SPDY within the anonymizing Tor network for reducing
        latency and traffic in the latter. A C library implementing the SPDY server
        protocol is introduced together with an HTTP to SPDY and a SPDY to HTTP proxy
        which are the base for the presented design. Furthermore, we focus on the SPDY
        server push feature which allows servers to send multiple responses to a single
        request for reducing latency and traffic on loading web pages. We propose a
        prediction algorithm for employing push at SPDY servers and proxies. The
        algorithm makes predictions based on previous requests and responses and
        initially does not know anything about the data which it will push. This thesis
        includes extensive measurement data highlighting the possible benefits of using
        SPDY instead of HTTP and HTTPS (1.0 or 1.1), especially with respect to networks
        experiencing latency or loss. Moreover, the real profit from using SPDY within
        the Tor network on loading some of the most popular web sites is presented.
        Finally, evaluations of the proposed push prediction algorithm are given for
        emphasizing the possible gain of employing it at SPDY reverse and forward
        proxies}, 
  www_section = {Unsorted}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/uzunov2013torspdy.pdf}, 
  url = {https://bibliography.gnunet.org}, 
}
2013_6
@conference{2013_6,
  title = {Trawling for Tor Hidden Services: Detection, Measurement, Deanonymization}, 
  author = {Biryukov, A. and Pustogarov, I. and Weinmann, R.}, 
  booktitle = {Security and Privacy (SP), 2013 IEEE Symposium on}, 
  year = {2013}, 
  www_section = {Unsorted}, 
  doi = {10.1109/SP.2013.15}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Trawling_for_tor_HS.pdf},
  url = {https://bibliography.gnunet.org}, 
}
2013_7
@conference{2013_7,
  title = {WhatsUp: A Decentralized Instant News Recommender}, 
  author = {Antoine Boutet and Davide Frey and Rachid Guerraoui and Arnaud Jegou and
        Anne-Marie Kermarrec}, 
  booktitle = {IEEE 27th International Symposium on Parallel & Distributed Processing}, 
  organization = {IEEE}, 
  year = {2013}, 
  publisher = {IEEE}, 
  abstract = {We present WHATSUP, a collaborative filtering system for disseminating news
        items in a large-scale dynamic setting with no central authority. WHATSUP
        constructs an implicit social network based on user profiles that express the
        opinions of users about the news items they receive (like-dislike). Users with
        similar tastes are clustered using a similarity metric reflecting long-standing
        and emerging (dis)interests. News items are disseminated through a novel
        heterogeneous gossip protocol that (1) biases the orientation of its targets
        towards those with similar interests, and (2) amplifies dissemination based on
        the level of interest in every news item. We report on an extensive evaluation of
        WHATSUP through (a) simulations, (b) a ModelNet emulation on a cluster, and (c) a
        PlanetLab deployment based on real datasets. We show that WHATSUP outperforms
        various alternatives in terms of accurate and complete delivery of relevant news
        items while preserving the fundamental advantages of standard gossip: namely,
        simplicity of deployment and robustness}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/whatsup.pdf}, 
  www_section = {Unsorted}, 
  url = {https://bibliography.gnunet.org}, 
}
2016
@conference{2016,
  title = {Managing and Presenting User Attributes over a Decentralized Secure Name
        System}, 
  author = {Martin Schanzenbach and Christian Banse}, 
  booktitle = {Data Privacy Management and Security Assurance--11th International Workshop,
        {DPM} 2016 and 5th International Workshop, {QASA} 2016, Heraklion, Crete, Greece,
        September 26-27, 2016, Proceedings}, 
  organization = {Springer}, 
  year = {2016}, 
  month = {September}, 
  address = {Crete, Greece}, 
  publisher = {Springer}, 
  abstract = {Today, user attributes are managed at centralized identity providers.
        However, two centralized identity providers dominate digital identity and access
        management on the web. This is increasingly becoming a privacy problem in times
        of mass surveillance and data mining for targeted advertisement. Existing systems
        for attribute sharing or credential presentation either rely on a trusted third
        party service or require the presentation to be online and synchronous. In this
        paper we propose a concept that allows the user to manage and share his
        attributes asynchronously with a requesting party using a secure, decentralized
        name system}, 
  www_section = {Decentralisation, GNUnet, Identity and Access Management, User
        Attributes}, 
  www_tags = {selected}, 
  url = {https://bibliography.gnunet.org}, 
}
2017_0
@mastersthesis{2017_0,
  title = {The GNUnet System}, 
  author = {Grothoff, Christian}, 
  school = {Universit{\'e} de Rennes 1}, 
  volume = {HDR}, 
  year = {2017}, 
  month = {December}, 
  address = {Rennes}, 
  pages = {0--181}, 
  type = {Habilitation {\`a} diriger des recherches}, 
  abstract = {GNUnet is an alternative network stack for building secure, decentralized and
        privacy-preserving distributed applications. Our goal is to replace the old
        insecure Internet protocol stack. Starting from an application for secure
        publication of files, it has grown to include all kinds of basic protocol
        components and applications towards the creation of a GNU internet. This
        habilitation provides an overview of the GNUnet architecture, including the
        development process, the network architecture and the software architecture. The
        goal of Part 1 is to provide an overview of how the various parts of the project
        work together today, and to then give ideas for future directions. The text is a
        first attempt to provide this kind of synthesis, and in return does not go into
        extensive technical depth on any particular topic. Part 2 then gives selected
        technical details based on eight publications covering many of the core
        components. This is a harsh selection; on the GNUnet website there are more than
        50 published research papers and theses related to GNUnet, providing extensive
        and in-depth documentation. Finally, Part 3 gives an overview of current plans
        and future work}, 
  keywords = {decentralization, GNUnet, peer-to-peer, privacy, private information
        retrieval, routing, secure multiparty computation, self-organization}, 
  www_section = {decentralization, GNUnet, peer-to-peer, privacy, private information
        retrieval, routing, secure multiparty computation, self-organization}, 
  www_tags = {selected}, 
  doi = {https://hal.inria.fr/tel-01654244}, 
  url = {https://grothoff.org/christian/habil.pdf}, 
}
2018_0
@article{2018_0,
  title = {Toward secure name resolution on the internet}, 
  author = {Christian Grothoff and Matthias Wachs and Monika Ermert and Jacob Appelbaum}, 
  journal = {Computers & Security}, 
  year = {2018}, 
  abstract = {The Domain Name System (DNS) provides crucial name resolution functions for
        most Internet services. As a result, DNS traffic provides an important attack
        vector for mass surveillance, as demonstrated by the QUANTUMDNS and MORECOWBELL
        programs of the NSA. This article reviews how DNS works and describes security
        considerations for next generation name resolution systems. We then describe DNS
        variations and analyze their impact on security and privacy. We also consider
        Namecoin, the GNU Name System and RAINS, which are more radical re-designs of
        name systems in that they both radically change the wire protocol and also
        eliminate the existing global consensus on TLDs provided by ICANN. Finally, we
        assess how the different systems stack up with respect to the goal of improving
        security and privacy of name resolution for the future Internet}, 
  keywords = {Future Internet, GNUnet, Name resolution, network architecture, privacy,
        Technology and society}, 
  www_section = {Future Internet, GNUnet, Name resolution, network architecture, privacy,
        Technology and society}, 
  issn = {0167-4048}, 
  doi = {https://doi.org/10.1016/j.cose.2018.01.018}, 
  url = {http://www.sciencedirect.com/science/article/pii/S0167404818300403}, 
  www_tags = {selected}, 
}
2018_1
@inproceedings{2018_1,
  title = {reclaimID: Secure, Self-Sovereign Identities using Name Systems and
        Attribute-Based Encryption}, 
  author = {Schanzenbach, M. and Bramm, G. and Sch{\"u}tte, J.}, 
  booktitle = {Proceedings of 17th IEEE International Conference On Trust, Security And
        Privacy In Computing And Communications/ 12th IEEE International Conference On
        Big Data Science And Engineering (TrustCom/BigDataSE)}, 
  year = {2018}, 
  abstract = {In this paper we present reclaimID: An architecture that allows users to
        reclaim their digital identities by securely sharing identity attributes without
        the need for a centralised service provider. We propose a design where user
        attributes are stored in and shared over a name system under user-owned
        namespaces. Attributes are encrypted using attribute-based encryption (ABE),
        allowing the user to selectively authorize and revoke access of requesting
        parties to subsets of his attributes. We present an implementation based on the
        decentralised GNU Name System (GNS) in combination with ciphertext-policy ABE
        using type-1 pairings. To show the practicality of our implementation, we carried
        out experimental evaluations of selected implementation aspects including
        attribute resolution performance. Finally, we show that our design can be used as
        a standard OpenID Connect Identity Provider allowing our implementation to be
        integrated into standard-compliant services}, 
  keywords = {Computer Science - Cryptography and Security}, 
  www_section = {Computer Science - Cryptography and Security}, 
  url = {https://arxiv.org/abs/1805.06253v1}, 
  www_tags = {selected}, 
}
214121
@article{214121,
  title = {Impossibility of distributed consensus with one faulty process}, 
  author = {Fischer, Michael J. and Lynch, Nancy A. and Paterson, Michael S.}, 
  journal = {J. ACM}, 
  volume = {32}, 
  number = {2}, 
  year = {1985}, 
  address = {New York, NY, USA}, 
  pages = {374--382}, 
  publisher = {ACM}, 
  abstract = {The consensus problem involves an asynchronous system of processes, some of
        which may be unreliable. The problem is for the reliable processes to agree on a
        binary value. In this paper, it is shown that every protocol for this problem has
        the possibility of nontermination, even with only one faulty process. By way of
        contrast, solutions are known for the synchronous case, the
        {\textquotedblleft}Byzantine Generals{\textquotedblright} problem}, 
  issn = {0004-5411}, 
  doi = {10.1145/3149.214121}, 
  url = {http://portal.acm.org/citation.cfm?id=214121$\#$}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/pods06_paper01.pdf}, 
  www_section = {Unsorted}, 
}
224068
@article{224068,
  title = {Exploiting weak connectivity for mobile file access}, 
  author = {Lily B. Mummert and Maria Ebling and Satyanarayanan, Mahadev}, 
  journal = {SIGOPS Oper. Syst. Rev}, 
  volume = {29}, 
  number = {5}, 
  year = {1995}, 
  address = {New York, NY, USA}, 
  pages = {143--155}, 
  publisher = {ACM}, 
  issn = {0163-5980}, 
  doi = {10.1145/224057.224068}, 
  url = {http://portal.acm.org/citation.cfm?id=224068$\#$}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/s15.pdf}, 
  www_section = {Unsorted}, 
}
285258
@conference{285258,
  title = {A digital fountain approach to reliable distribution of bulk data}, 
  author = {Byers, John W. and Luby, Michael and Michael Mitzenmacher and Rege, Ashutosh}, 
  booktitle = {SIGCOMM'98: Proceedings of SIGCOMM'98 Conference on Applications,
        Technologies, Architectures, and Protocols for Computer Communication}, 
  organization = {ACM}, 
  year = {1998}, 
  month = {September}, 
  address = {Vancouver, Canada}, 
  pages = {56--67}, 
  publisher = {ACM}, 
  abstract = {The proliferation of applications that must reliably distribute bulk data to
        a large number of autonomous clients motivates the design of new multicast and
        broadcast protocols. We describe an ideal, fully scalable protocol for these
        applications that we call a digital fountain. A digital fountain allows any
        number of heterogeneous clients to acquire bulk data with optimal efficiency at
        times of their choosing. Moreover, no feedback channels are needed to ensure
        reliable delivery, even in the face of high loss rates.We develop a protocol that
        closely approximates a digital fountain using a new class of erasure codes that
        for large block sizes are orders of magnitude faster than standard erasure codes.
        We provide performance measurements that demonstrate the feasibility of our
        approach and discuss the design, implementation and performance of an
        experimental system}, 
  www_section = {coding theory, multicast}, 
  isbn = {1-58113-003-1}, 
  doi = {10.1145/285237.285258}, 
  url = {http://portal.acm.org/citation.cfm?id=285258\&dl=GUIDE\&coll=GUIDE\&CFID=102355791\&CFTOKEN=32605420$\#$},
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.72.3011.pdf}, 
}
301333
@conference{301333,
  title = {Flash mixing}, 
  author = {Jakobsson, Markus}, 
  booktitle = {PODC '99: Proceedings of the eighteenth annual ACM symposium on Principles
        of distributed computing}, 
  organization = {ACM}, 
  year = {1999}, 
  address = {New York, NY, USA}, 
  pages = {83--89}, 
  publisher = {ACM}, 
  isbn = {1-58113-099-6}, 
  doi = {http://doi.acm.org/10.1145/301308.301333}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/flash-mix.pdf}, 
  url = {https://bibliography.gnunet.org}, 
%%%%% ERROR: Missing field
% www_section = {?????},
}
313556
@conference{313556,
  title = {Next century challenges: scalable coordination in sensor networks}, 
  author = {Deborah Estrin and Govindan, Ramesh and Heidemann, John and Kumar, Satish}, 
  booktitle = {MobiCom '99: Proceedings of the 5th annual ACM/IEEE international conference
        on Mobile computing and networking}, 
  organization = {ACM}, 
  year = {1999}, 
  address = {New York, NY, USA}, 
  pages = {263--270}, 
  publisher = {ACM}, 
  abstract = {Networked sensors -- those that coordinate amongst themselves to achieve a
        larger sensing task -- will revolutionize information gathering and processing
        both in urban environments and in inhospitable terrain. The sheer numbers of
        these sensors and the expected dynamics in these environments present unique
        challenges in the design of unattended autonomous sensor networks. These
        challenges lead us to hypothesize that sensor network coordination applications
        may need to be structured differently from traditional network applications. In
        particular, we believe that localized algorithms (in which simple local node
        behavior achieves a desired global objective) may be necessary for sensor network
        coordination. In this paper, we describe localized algorithms, and then discuss
        directed diffusion, a simple communication model for describing localized
        algorithms}, 
  www_section = {sensor networks}, 
  isbn = {1-58113-142-9}, 
  doi = {10.1145/313451.313556}, 
  url = {http://portal.acm.org/citation.cfm?id=313451.313556$\#$}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.88.2867.pdf}, 
}
314722
@conference{314722,
  title = {Analysis of random processes via And-Or tree evaluation}, 
  author = {Luby, Michael and Michael Mitzenmacher and M. Amin Shokrollahi}, 
  booktitle = {SODA '98: Proceedings of the ninth annual ACM-SIAM symposium on Discrete
        algorithms}, 
  organization = {Society for Industrial and Applied Mathematics}, 
  year = {1998}, 
  address = {Philadelphia, PA, USA}, 
  pages = {364--373}, 
  publisher = {Society for Industrial and Applied Mathematics}, 
  abstract = {We introduce a new set of probabilistic analysis tools based on the analysis
        of And-Or trees with random inputs. These tools provide a unifying, intuitive,
        and powerful framework for carrying out the analysis of several previously
        studied random processes of interest, including random loss-resilient codes,
        solving random k-SAT formula using the pure literal rule, and the greedy
        algorithm for matchings in random graphs. In addition, these tools allow
        generalizations of these problems not previously analyzed to be analyzed in a
        straightforward manner. We illustrate our methodology on the three problems
        listed above. 1 Introduction We introduce a new set of probabilistic analysis
        tools related to the amplification method introduced by [12] and further
        developed and used in [13, 5]. These tools provide a unifying, intuitive, and
        powerful framework for carrying out the analysis of several previously studied
        random processes of interest, including the random loss-resilient codes
        introduced}, 
  www_section = {And-Or trees, coding theory}, 
  isbn = {0-89871-410-9}, 
  url = {http://portal.acm.org/citation.cfm?id=314722\&dl=GUIDE\&coll=GUIDE\&CFID=102355791\&CFTOKEN=32605420$\#$},
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.63.2427.pdf}, 
}
319159
@article{319159,
  title = {Deciding when to forget in the Elephant file system}, 
  author = {Santry, Douglas S. and Feeley, Michael J. and Hutchinson, Norman C. and Veitch,
        Alistair C. and Carton, Ross W. and Ofir, Jacob}, 
  journal = {SIGOPS Oper. Syst. Rev}, 
  volume = {33}, 
  number = {5}, 
  year = {1999}, 
  address = {New York, NY, USA}, 
  pages = {110--123}, 
  publisher = {ACM}, 
  abstract = {Modern file systems associate the deletion of a file with the immediate
        release of storage, and file writes with the irrevocable change of file contents.
        We argue that this behavior is a relic of the past, when disk storage was a
        scarce resource. Today, large cheap disks make it possible for the file system to
        protect valuable data from accidental delete or overwrite. This paper describes
        the design, implementation, and performance of the Elephant file system, which
        automatically retains all important versions of user files. Users name previous
        file versions by combining a traditional pathname with a time when the desired
        version of a file or directory existed. Storage in Elephant is managed by the
        system using filegrain user-specified retention policies. This approach contrasts
        with checkpointing file systems such as Plan-9, AFS, and WAFL that periodically
        generate efficient checkpoints of entire file systems and thus restrict retention
        to be guided by a single policy for all files within that file system. Elephant
        is implemented as a new Virtual File System in the FreeBSD kernel}, 
  www_section = {file systems, storage}, 
  issn = {0163-5980}, 
  doi = {10.1145/319344.319159}, 
  url = {http://portal.acm.org/citation.cfm?id=319159$\#$}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/p110-santry.pdf}, 
}
335325
@conference{335325,
  title = {The small-world phenomenon: an algorithm perspective}, 
  author = {Kleinberg, Jon}, 
  booktitle = {STOC '00: Proceedings of the thirty-second annual ACM symposium on Theory of
        computing}, 
  organization = {ACM}, 
  year = {2000}, 
  address = {New York, NY, USA}, 
  pages = {163--170}, 
  publisher = {ACM}, 
  abstract = {Long a matter of folklore, the {\textquotedblleft}small-world phenomenon
        {\textquotedblright} {\textemdash} the principle that we are all linked by short
        chains of acquaintances {\textemdash} was inaugurated as an area of experimental
        study in the social sciences through the pioneering work of Stanley Milgram in
        the 1960's. This work was among the first to make the phenomenon quantitative,
        allowing people to speak of the {\textquotedblleft}six degrees of separation
        {\textquotedblright} between any two people in the United States. Since then, a
        number of network models have been proposed as frameworks in which to study the
        problem analytically. One of the most refined of these models was formulated in
        recent work of Watts and Strogatz; their framework provided compelling evidence
        that the small-world phenomenon is pervasive in a range of networks arising in
        nature and technology, and a fundamental ingredient in the evolution of the World
        Wide Web. But existing models are insufficient to explain the striking
        algorithmic component of Milgram's original findings: that individuals using
        local information are collectively very effective at actually constructing short
        paths between two points in a social network. Although recently proposed network
        models are rich in short paths, we prove that no decentralized algorithm,
        operating with local information only, can construct short paths in these
        networks with non-negligible probability. We then define an infinite family of
        network models that naturally generalizes the Watts-Strogatz model, and show that
        for one of these models, there is a decentralized algorithm capable of finding
        short paths with high probability. More generally, we provide a strong
        characterization of this family of network models, showing that there is in fact
        a unique model within the family for which decentralized algorithms are
        effective}, 
  www_section = {small-world}, 
  isbn = {1-58113-184-4}, 
  doi = {10.1145/335305.335325}, 
  url = {http://portal.acm.org/citation.cfm?id=335325$\#$}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/swn.pdf}, 
}
335405
@article{335405,
  title = {XMill: an efficient compressor for XML data}, 
  author = {Liefke, Hartmut and Suciu, Dan}, 
  journal = {SIGMOD Rec}, 
  volume = {29}, 
  number = {2}, 
  year = {2000}, 
  address = {New York, NY, USA}, 
  pages = {153--164}, 
  publisher = {ACM}, 
  abstract = {We describe a tool for compressing XML data, with applications in data
        exchange and archiving, which usually achieves about twice the compression ratio
        of gzip at roughly the same speed. The compressor, called XMill, incorporates and
        combines existing compressors in order to apply them to heterogeneous XML data:
        it uses zlib, the library function for gzip, a collection of datatype specific
        compressors for simple data types, and, possibly, user defined compressors for
        application specific data types}, 
  www_section = {compression}, 
  issn = {0163-5808}, 
  doi = {10.1145/335191.335405}, 
  url = {http://portal.acm.org/citation.cfm?id=335405$\#$}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.33.2632.pdf}, 
}
338955
@article{338955,
  title = {Ant algorithms for discrete optimization}, 
  author = {Dorigo, Marco and Di Caro, Gianni and Gambardella, Luca M.}, 
  journal = {Artif. Life}, 
  volume = {5}, 
  number = {2}, 
  year = {1999}, 
  address = {Cambridge, MA, USA}, 
  pages = {137--172}, 
  publisher = {MIT Press}, 
  abstract = {This article presents an overview of recent work on ant algorithms, that is,
        algorithms for discrete optimization that took inspiration from the observation
        of ant colonies' foraging behavior, and introduces the ant colony optimization
        (ACO) metaheuristic. In the first part of the article the basic biological
        findings on real ants are reviewed and their artificial counterparts as well as
        the ACO metaheuristic are defined. In the second part of the article a number of
        applications of ACO algorithms to combinatorial optimization and routing in
        communications networks are described. We conclude with a discussion of related
        work and of some of the most important aspects of the ACO metaheuristic}, 
  www_section = {ant colony optimization, metaheuristics, natural computation, swarm
        intelligence}, 
  issn = {1064-5462}, 
  doi = {10.1162/106454699568728}, 
  url = {http://portal.acm.org/citation.cfm?id=338955$\#$}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/ij_23-alife99.pdf}, 
}
339337
@conference{339337,
  title = {A case for end system multicast (keynote address)}, 
  author = {Chu, Yang-hua and Rao, Sanjay G. and Zhang, Hui}, 
  booktitle = {SIGMETRICS '00: Proceedings of the 2000 ACM SIGMETRICS international
        conference on Measurement and modeling of computer systems}, 
  organization = {ACM}, 
  year = {2000}, 
  month = {June}, 
  address = {Santa Clara, CA}, 
  pages = {1--12}, 
  publisher = {ACM}, 
  abstract = {The conventional wisdom has been that IP is the natural protocol layer for
        implementing multicast related functionality. However, ten years after its
        initial proposal, IP Multicast is still plagued with concerns pertaining to
        scalability, network management, deployment and support for higher layer
        functionality such as error, flow and congestion control. In this paper, we
        explore an alternative architecture for small and sparse groups, where end
        systems implement all multicast related functionality including membership
        management and packet replication. We call such a scheme End System Multicast.
        This shifting of multicast support from routers to end systems has the potential
        to address most problems associated with IP Multicast. However, the key concern
        is the performance penalty associated with such a model. In particular, End
        System Multicast introduces duplicate packets on physical links and incurs larger
        end-to-end delay than IP Multicast. In this paper, we study this question in the
        context of the Narada protocol. In Narada, end systems self-organize into an
        overlay structure using a fully distributed protocol. In addition, Narada
        attempts to optimize the efficiency of the overlay based on end-to-end
        measurements. We present details of Narada and evaluate it using both simulation
        and Internet experiments. Preliminary results are encouraging. In most
        simulations and Internet experiments, the delay and bandwidth penalty are low. We
        believe the potential benefits of repartitioning multicast functionality between
        end systems and routers significantly outweigh the performance penalty incurred}, 
  www_section = {multicast}, 
  isbn = {1-58113-194-1}, 
  doi = {10.1145/339331.339337}, 
  url = {http://portal.acm.org/citation.cfm?id=339337$\#$}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/jsac-2001.pdf}, 
}
339345
@article{339345,
  title = {Feasibility of a serverless distributed file system deployed on an existing set
        of desktop PCs}, 
  author = {Bolosky, William J. and John R. Douceur and Ely, David and Marvin Theimer}, 
  journal = {SIGMETRICS Performance Evaluation Review}, 
  volume = {28}, 
  number = {1}, 
  year = {2000}, 
  address = {New York, NY, USA}, 
  pages = {34--43}, 
  publisher = {ACM}, 
  abstract = {We consider an architecture for a serverless distributed file system that
        does not assume mutual trust among the client computers. The system provides
        security, availability, and reliability by distributing multiple encrypted
        replicas of each file among the client machines. To assess the feasibility of
        deploying this system on an existing desktop infrastructure, we measure and
        analyze a large set of client machines in a commercial environment. In
        particular, we measure and report results on disk usage and content; file
        activity; and machine uptimes, lifetimes, and loads. We conclude that the
        measured desktop infrastructure would passably support our proposed system,
        providing availability on the order of one unfilled file request per user per
        thousand days}, 
  www_section = {analytical modeling, availability, feasibility analysis, personal computer
        usage data, reliability, serverless distributed file system architecture, trust,
        workload characterization}, 
  issn = {0163-5999}, 
  doi = {10.1145/345063.339345}, 
  url = {http://portal.acm.org/citation.cfm?id=345063.339345$\#$}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.65.4280_0.pdf}, 
}
357176
@article{357176,
  title = {The Byzantine Generals Problem}, 
  author = {Lamport, Leslie and Shostak, Robert and Pease, Marshall}, 
  journal = {ACM Trans. Program. Lang. Syst}, 
  volume = {4}, 
  number = {3}, 
  year = {1982}, 
  address = {New York, NY, USA}, 
  pages = {382--401}, 
  publisher = {ACM}, 
  issn = {0164-0925}, 
  doi = {10.1145/357172.357176}, 
  url = {http://portal.acm.org/citation.cfm?id=357176$\#$}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/byz.pdf}, 
  www_section = {Unsorted}, 
}
368907
@article{368907,
  title = {On programming of arithmetic operations}, 
  author = {Andrey Petrovych Ershov}, 
  journal = {Commun. ACM}, 
  volume = {1}, 
  number = {8}, 
  year = {1958}, 
  address = {New York, NY, USA}, 
  pages = {3--6}, 
  publisher = {ACM}, 
  issn = {0001-0782}, 
  doi = {10.1145/368892.368907}, 
  url = {http://portal.acm.org/citation.cfm?id=368907$\#$}, 
  www_section = {Unsorted}, 
}
37517
@article{37517,
  title = {A simple and efficient implementation of a small database}, 
  author = {Andrew D. Birrell and Michael B. Jones and Edward P. Wobber}, 
  journal = {SIGOPS Oper. Syst. Rev}, 
  volume = {21}, 
  number = {5}, 
  year = {1987}, 
  address = {New York, NY, USA}, 
  pages = {149--154}, 
  publisher = {ACM}, 
  abstract = {This paper describes a technique for implementing the sort of small databases
        that frequently occur in the design of operating systems and distributed systems.
        We take advantage of the existence of very large virtual memories, and quite
        large real memories, to make the technique feasible. We maintain the database as
        a strongly typed data structure in virtual memory, record updates incrementally
        on disk in a log and occasionally make a checkpoint of the entire database. We
        recover from crashes by restoring the database from an old checkpoint then
        replaying the log. We use existing packages to convert between strongly typed
        data objects and their disk representations, and to communicate strongly typed
        data across the network (using remote procedure calls). Our memory is managed
        entirely by a general purpose allocator and garbage collector. This scheme has
        been used to implement a name server for a distributed system. The resulting
        implementation has the desirable property of being simultaneously simple,
        efficient and reliable}, 
  issn = {0163-5980}, 
  doi = {10.1145/37499.37517}, 
  url = {http://portal.acm.org/citation.cfm?id=37499.37517$\#$}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/024-DatabasesPaper.pdf},
  www_section = {Unsorted}, 
}
378347
@conference{378347,
  title = {Bayeux: an architecture for scalable and fault-tolerant wide-area data
        dissemination}, 
  author = {Shelley Zhuang and Ben Y. Zhao and Anthony D. Joseph and Katz, Randy H. and
        John Kubiatowicz}, 
  booktitle = {NOSSDAV '01: Proceedings of the 11th international workshop on Network and
        operating systems support for digital audio and video}, 
  organization = {ACM}, 
  year = {2001}, 
  address = {New York, NY, USA}, 
  pages = {11--20}, 
  publisher = {ACM}, 
  abstract = {The demand for streaming multimedia applications is growing at an incr edible
        rate. In this paper, we propose Bayeux, an efficient application-level multicast
        system that scales to arbitrarily large receiver groups while tolerating failures
        in routers and network links. Bayeux also includes specific mechanisms for
        load-balancing across replicate root nodes and more efficient bandwidth
        consumption. Our simulation results indicate that Bayeux maintains these
        properties while keeping transmission overhead low. To achieve these properties,
        Bayeux leverages the architecture of Tapestry, a fault-tolerant, wide-area
        overlay routing and location network}, 
  www_section = {fault-tolerance, load balancing}, 
  isbn = {1-58113-370-7}, 
  doi = {10.1145/378344.378347}, 
  url = {http://portal.acm.org/citation.cfm?id=378347$\#$}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/bayeux.pdf}, 
}
379239
@conference{379239,
  title = {OceanStore: an architecture for global-scale persistent storage}, 
  author = {John Kubiatowicz and Bindel, David and Chen, Yan and Czerwinski, Steven and
        Eaton, Patrick and Geels, Dennis and Gummadi, Ramakrishna and Rhea, Sean C. and
        Weatherspoon, Hakim and Wells, Chris and Ben Y. Zhao}, 
  booktitle = {ASPLOS-IX: Proceedings of the ninth international conference on
        Architectural support for programming languages and operating systems}, 
  organization = {ACM}, 
  year = {2000}, 
  address = {New York, NY, USA}, 
  pages = {190--201}, 
  publisher = {ACM}, 
  abstract = {OceanStore is a utility infrastructure designed to span the globe and provide
        continuous access to persistent information. Since this infrastructure is
        comprised of untrusted servers, data is protected through redundancy and
        cryptographic techniques. To improve performance, data is allowed to be cached
        anywhere, anytime. Additionally, monitoring of usage patterns allows adaptation
        to regional outages and denial of service attacks; monitoring also enhances
        performance through pro-active movement of data. A prototype implementation is
        currently under development}, 
  isbn = {1-58113-317-0}, 
  doi = {10.1145/378993.379239}, 
  url = {http://doi.acm.org/10.1145/378993.379239}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/p190-kubi.pdf}, 
%%%%% ERROR: Missing field
% www_section = {?????},
}
383072
@conference{383072,
  title = {A scalable content-addressable network}, 
  author = {Sylvia Paul Ratnasamy and Paul Francis and Handley, Mark and Richard Karp and S
        Shenker}, 
  booktitle = {SIGCOMM '01: Proceedings of the 2001 conference on Applications,
        technologies, architectures, and protocols for computer communications}, 
  organization = {ACM}, 
  year = {2001}, 
  address = {New York, NY, USA}, 
  pages = {161--172}, 
  publisher = {ACM}, 
  abstract = {Hash tables--which map "keys" onto "values"--are an essential building block
        in modern software systems. We believe a similar functionality would be equally
        valuable to large distributed systems. In this paper, we introduce the concept of
        a Content-Addressable Network (CAN) as a distributed infrastructure that provides
        hash table-like functionality on Internet-like scales. The CAN is scalable,
        fault-tolerant and completely self-organizing, and we demonstrate its
        scalability, robustness and low-latency properties through simulation}, 
  www_section = {CAN, fault-tolerance, robustness}, 
  isbn = {1-58113-411-8}, 
  doi = {10.1145/383059.383072}, 
  url = {http://portal.acm.org/citation.cfm?id=383072$\#$}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.19.8434.pdf}, 
}
4202
@article{4202,
  title = {RCS---a system for version control}, 
  author = {Tichy, Walter F.}, 
  journal = {Softw. Pract. Exper}, 
  volume = {15}, 
  number = {7}, 
  year = {1985}, 
  address = {New York, NY, USA}, 
  pages = {637--654}, 
  publisher = {John Wiley & Sons, Inc}, 
  abstract = {An important problem in program development and maintenance is version
        control, i.e., the task of keeping a software system consisting of many versions
        and configurations well organized. The Revision Control System (RCS) is a
        software tool that assists with that task. RCS manages revisions of text
        documents, in particular source programs, documentation, and test data. It
        automates the storing, retrieval, logging and identification of revisions, and it
        provides selection mechanisms for composing configurations. This paper introduces
        basic version control concepts and discusses the practice of version control
        using RCS. For conserving space, RCS stores deltas, i.e., differences between
        successive revisions. Several delta storage methods are discussed. Usage
        statistics show that RCS's delta storage method is space and time efficient. The
        paper concludes with a detailed survey of version control tools}, 
  www_section = {version control}, 
  issn = {0038-0644}, 
  doi = {10.1002/spe.4380150703}, 
  url = {http://portal.acm.org/citation.cfm?id=4202$\#$}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.56.3350_0.pdf}, 
}
501437
@conference{501437,
  title = {The quest for security in mobile ad hoc networks}, 
  author = {Jean-Pierre Hubaux and Levente Butty{\'a}n and Srdan Capkun}, 
  booktitle = {MobiHoc '01: Proceedings of the 2nd ACM international symposium on Mobile ad
        hoc networking \& computing}, 
  organization = {ACM}, 
  year = {2001}, 
  address = {New York, NY, USA}, 
  pages = {146--155}, 
  publisher = {ACM}, 
  abstract = {So far, research on mobile ad hoc networks has been forcused primarily on
        routing issues. Security, on the other hand, has been given a lower priority.
        This paper provides an overview of security problems for mobile ad hoc networks,
        distinguishing the threats on basic mechanisms and on security mechanisms. It
        then describes our solution to protect the security mechanisms. The original
        features of this solution include that (i) it is fully decentralized and (ii) all
        nodes are assigned equivalent roles}, 
  www_section = {ad-hoc networks, routing}, 
  isbn = {1-58113-428-2}, 
  doi = {10.1145/501436.501437}, 
  url = {http://portal.acm.org/citation.cfm?id=501437$\#$}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Quest01.pdf}, 
}
502048
@article{502048,
  title = {Resilient overlay networks}, 
  author = {Andersen, David and Hari Balakrishnan and Frans M. Kaashoek and Robert Morris}, 
  journal = {SIGOPS Oper. Syst. Rev}, 
  volume = {35}, 
  number = {5}, 
  year = {2001}, 
  address = {New York, NY, USA}, 
  pages = {131--145}, 
  publisher = {ACM}, 
  abstract = {A Resilient Overlay Network (RON) is an architecture that allows distributed
        Internet applications to detect and recover from path outages and periods of
        degraded performance within several seconds, improving over today's wide-area
        routing protocols that take at least several minutes to recover. A RON is an
        application-layer overlay on top of the existing Internet routing substrate. The
        RON nodes monitor the functioning and quality of the Internet paths among
        themselves, and use this information to decide whether to route packets directly
        over the Internet or by way of other RON nodes, optimizing application-specific
        routing metrics.Results from two sets of measurements of a working RON deployed
        at sites scattered across the Internet demonstrate the benefits of our
        architecture. For instance, over a 64-hour sampling period in March 2001 across a
        twelve-node RON, there were 32 significant outages, each lasting over thirty
        minutes, over the 132 measured paths. RON's routing mechanism was able to detect,
        recover, and route around all of them, in less than twenty seconds on average,
        showing that its methods for fault detection and recovery work well at
        discovering alternate paths in the Internet. Furthermore, RON was able to improve
        the loss rate, latency, or throughput perceived by data transfers; for example,
        about 5\% of the transfers doubled their TCP throughput and 5\% of our transfers
        saw their loss probability reduced by 0.05. We found that forwarding packets via
        at most one intermediate RON node is sufficient to overcome faults and improve
        performance in most cases. These improvements, particularly in the area of fault
        detection and recovery, demonstrate the benefits of moving some of the control
        over routing into the hands of end-systems}, 
  www_section = {resilient overlay network}, 
  issn = {0163-5980}, 
  doi = {10.1145/502059.502048}, 
  url = {http://portal.acm.org/citation.cfm?id=502059.502048$\#$}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/ron-sosp2001.pdf}, 
}
502052
@conference{502052,
  title = {A low-bandwidth network file system}, 
  author = {Muthitacharoen, Athicha and Chen, Benjie and David Mazi{\`e}res}, 
  booktitle = {SOSP '01: Proceedings of the eighteenth ACM symposium on Operating systems
        principles}, 
  organization = {ACM}, 
  year = {2001}, 
  address = {New York, NY, USA}, 
  pages = {174--187}, 
  publisher = {ACM}, 
  abstract = {Users rarely consider running network file systems over slow or wide-area
        networks, as the performance would be unacceptable and the bandwidth consumption
        too high. Nonetheless, efficient remote file access would often be desirable over
        such networks---particularly when high latency makes remote login sessions
        unresponsive. Rather than run interactive programs such as editors remotely,
        users could run the programs locally and manipulate remote files through the file
        system. To do so, however, would require a network file system that consumes less
        bandwidth than most current file systems.This paper presents LBFS, a network file
        system designed for low-bandwidth networks. LBFS exploits similarities between
        files or versions of the same file to save bandwidth. It avoids sending data over
        the network when the same data can already be found in the server's file system
        or the client's cache. Using this technique in conjunction with conventional
        compression and caching, LBFS consumes over an order of magnitude less bandwidth
        than traditional network file systems on common workloads}, 
  www_section = {file systems, workload characterization}, 
  isbn = {1-58113-389-8}, 
  doi = {10.1145/502034.502052}, 
  url = {http://portal.acm.org/citation.cfm?id=502052$\#$}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/lbfs.pdf}, 
}
502054
@conference{502054,
  title = {Wide-area cooperative storage with CFS}, 
  author = {Dabek, Frank and Frans M. Kaashoek and David Karger and Robert Morris and Ion
        Stoica}, 
  booktitle = {SOSP '01: Proceedings of the eighteenth ACM symposium on Operating systems
        principles}, 
  organization = {ACM}, 
  year = {2001}, 
  address = {New York, NY, USA}, 
  pages = {202--215}, 
  publisher = {ACM}, 
  abstract = {The Cooperative File System (CFS) is a new peer-to-peer read-only storage
        system that provides provable guarantees for the efficiency, robustness, and
        load-balance of file storage and retrieval. CFS does this with a completely
        decentralized architecture that can scale to large systems. CFS servers provide a
        distributed hash table (DHash) for block storage. CFS clients interpret DHash
        blocks as a file system. DHash distributes and caches blocks at a fine
        granularity to achieve load balance, uses replication for robustness, and
        decreases latency with server selection. DHash finds blocks using the Chord
        location protocol, which operates in time logarithmic in the number of
        servers.CFS is implemented using the SFS file system toolkit and runs on Linux,
        OpenBSD, and FreeBSD. Experience on a globally deployed prototype shows that CFS
        delivers data to clients as fast as FTP. Controlled tests show that CFS is
        scalable: with 4,096 servers, looking up a block of data involves contacting only
        seven servers. The tests also demonstrate nearly perfect robustness and
        unimpaired performance even when as many as half the servers fail}, 
  www_section = {P2P}, 
  isbn = {1-58113-389-8}, 
  doi = {10.1145/502034.502054}, 
  url = {http://portal.acm.org/citation.cfm?id=502054$\#$}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/cfs_sosp.pdf}, 
}
511496
@conference{511496,
  title = {Choosing reputable servents in a P2P network}, 
  author = {Cornelli, Fabrizio and Ernesto Damiani and Sabrina De Capitani di Vimercati and
        Stefano Paraboschi and Pierangela Samarati}, 
  booktitle = {WWW '02: Proceedings of the 11th international conference on World Wide
        Web}, 
  organization = {ACM}, 
  year = {2002}, 
  address = {New York, NY, USA}, 
  pages = {376--386}, 
  publisher = {ACM}, 
  www_section = {credibility, polling protocol, reputation}, 
  isbn = {1-58113-449-5}, 
  doi = {10.1145/511446.511496}, 
  url = {http://portal.acm.org/citation.cfm?id=511496$\#$}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/samarati.pdf}, 
}
513828
@conference{513828,
  title = {Performance analysis of the CONFIDANT protocol}, 
  author = {Sonja Buchegger and Jean-Yves Le Boudec}, 
  booktitle = {MobiHoc '02: Proceedings of the 3rd ACM international symposium on Mobile ad
        hoc networking \& computing}, 
  organization = {ACM}, 
  year = {2002}, 
  address = {New York, NY, USA}, 
  pages = {226--236}, 
  publisher = {ACM}, 
  abstract = {Mobile ad-hoc networking works properly only if the participating nodes
        cooperate in routing and forwarding. However,it may be advantageous for
        individual nodes not to cooperate. We propose a protocol, called CONFIDANT, for
        making misbehavior unattractive; it is based on selective altruism and
        utilitarianism. It aims at detecting and isolating misbehaving nodes, thus making
        it unattractive to deny cooperation. Trust relationships and routing decisions
        are based on experienced, observed, or reported routing and forwarding behavior
        of other nodes. The detailed implementation of CONFIDANT in this paper assumes
        that the network layer is based on the Dynamic Source Routing (DSR) protocol. We
        present a performance analysis of DSR fortified by CONFIDANT and compare it to
        regular defenseless DSR. It shows that a network with CONFIDANT and up to 60\% of
        misbehaving nodes behaves almost as well as a benign network, in sharp contrast
        to a defenseless network. All simulations have been implemented and performed in
        GloMoSim}, 
  www_section = {cooperation, fairness, mobile Ad-hoc networks, reputation, robustness,
        routing, trust}, 
  isbn = {1-58113-501-7}, 
  doi = {10.1145/513800.513828}, 
  url = {http://portal.acm.org/citation.cfm?id=513828$\#$}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/BucheggerL02.pdf}, 
}
514164
@conference{514164,
  title = {Enforcing service availability in mobile ad-hoc WANs}, 
  author = {Levente Butty{\'a}n and Jean-Pierre Hubaux}, 
  booktitle = {MobiHoc '00: Proceedings of the 1st ACM international symposium on Mobile ad
        hoc networking \& computing}, 
  organization = {IEEE Press}, 
  year = {2000}, 
  address = {Piscataway, NJ, USA}, 
  pages = {87--96}, 
  publisher = {IEEE Press}, 
  abstract = {In this paper, we address the problem of service availability in mobile
        ad-hoc WANs. We present a secure mechanism to stimulate end users to keep their
        devices turned on, to refrain from overloading the network, and to thwart
        tampering aimed at converting the device into a "selfish" one. Our solution is
        based on the application of a tamper resistant security module in each device and
        cryptographic protection of messages}, 
  www_section = {ad-hoc networks, cryptography}, 
  isbn = {0-7803-6534-8}, 
  url = {http://portal.acm.org/citation.cfm?id=514164}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.84.5715.pdf}, 
}
5328076
@conference{5328076,
  title = {Performance Evaluation of On-Demand Multipath Distance Vector Routing Protocol
        under Different Traffic Models}, 
  author = {Malarkodi, B. and Rakesh, P. and Venkataramani, B.}, 
  booktitle = {International Conference on Advances in Recent Technologies in Communication
        and Computing, 2009. ARTCom '09}, 
  year = {2009}, 
  month = {October}, 
  pages = {77--80}, 
  abstract = {Traffic models are the heart of any performance evaluation of
        telecommunication networks. Understanding the nature of traffic in high speed,
        high bandwidth communication system is essential for effective operation and
        performance evaluation of the networks. Many routing protocols reported in the
        literature for Mobile ad hoc networks(MANETS) have been primarily designed and
        analyzed under the assumption of CBR traffic models, which is unable to capture
        the statistical characteristics of the actual traffic. It is necessary to
        evaluate the performance properties of MANETs in the context of more realistic
        traffic models. In an effort towards this end, this paper evaluates the
        performance of adhoc on demand multipath distance vector (AOMDV) routing protocol
        in the presence of poisson and bursty self similar traffic and compares them with
        that of CBR traffic. Different metrics are considered in analyzing the
        performance of routing protocol including packet delivery ratio, throughput and
        end to end delay. Our simulation results indicate that the packet delivery
        fraction and throughput in AOMDV is increased in the presence of self similar
        traffic compared to other traffic. Moreover, it is observed that the end to end
        delay in the presence of self similar traffic is lesser than that of CBR and
        higher than that of poisson traffic}, 
  www_section = {ad-hoc networks, AOMDV, distance vector, multi-path, performance}, 
  doi = {10.1109/ARTCom.2009.31}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/multipath-dv-perf.pdf}, 
}
538134
@book{538134,
  title = {Capability-Based Computer Systems}, 
  author = {Levy, Henry M.}, 
  organization = {Butterworth-Heinemann}, 
  year = {1984}, 
  address = {Newton, MA, USA}, 
  publisher = {Butterworth-Heinemann}, 
  isbn = {0932376223}, 
  url = {http://portal.acm.org/citation.cfm?id=538134$\#$}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Preface.pdf}, 
  www_section = {Unsorted}, 
}
558412
@book{558412,
  title = {Peer-to-Peer: Harnessing the Power of Disruptive Technologies}, 
  author = {Oram, Andy}, 
  organization = {O'Reilly \& Associates, Inc}, 
  year = {2001}, 
  address = {Sebastopol, CA, USA}, 
  editor = {Andy oram}, 
  publisher = {O'Reilly \& Associates, Inc}, 
  abstract = {Upstart software projects Napster, Gnutella, and Freenet have dominated
        newspaper headlines, challenging traditional approaches to content distribution
        with their revolutionary use of peer-to-peer file-sharing technologies. Reporters
        try to sort out the ramifications of seemingly ungoverned peer-to-peer networks.
        Lawyers, business leaders, and social commentators debate the virtues and evils
        of these bold new distributed systems. But what's really behind such disruptive
        technologies -- the breakthrough innovations that have rocked the music and media
        worlds? And what lies ahead? In this book, key peer-to-peer pioneers take us
        beyond the headlines and hype and show how the technology is changing the way we
        communicate and exchange information. Those working to advance peer-to-peer as a
        technology, a business opportunity, and an investment offer their insights into
        how the technology has evolved and where it's going. They explore the problems
        they've faced, the solutions they've discovered, the lessons they've learned, and
        their goals for the future of computer networking. Until now, Internet
        communities have been limited by the flat interactive qualities of email and
        network newsgroups, where people can exchange recommendations and ideas but have
        great difficulty commenting on one another's postings, structuring information,
        performing searches, and creating summaries. Peer-to-peer challenges the
        traditional authority of the client/server model, allowing shared information to
        reside instead with producers and users. Peer-to-peer networks empower users to
        collaborate on producing and consuming information, adding to it, commenting on
        it, and building communities around it. This compilation represents the collected
        wisdom of today's peer-to-peer luminaries. It includes contributions from
        Gnutella's Gene Kan, Freenet's Brandon Wiley, Jabber's Jeremie Miller, and many
        others -- plus serious discussions of topics ranging from accountability and
        trust to security and performance. Fraught with questions and promise,
        peer-to-peer is sure to remain on the computer industry's center stage for years
        to come}, 
  isbn = {059600110X}, 
  url = {http://portal.acm.org/citation.cfm?id=558412$\#$}, 
%%%%% ERROR: Missing field
% www_section = {?????},
}
567178
@article{567178,
  title = {A State-of-the-Art Survey on Software Merging}, 
  author = {Mens, Tom}, 
  journal = {IEEE Trans. Softw. Eng}, 
  volume = {28}, 
  number = {5}, 
  year = {2002}, 
  address = {Piscataway, NJ, USA}, 
  pages = {449--462}, 
  publisher = {IEEE Press}, 
  abstract = {Software merging is an essential aspect of the maintenance and evolution of
        large-scale software systems. This paper provides a comprehensive survey and
        analysis of available merge approaches. Over the years, a wide variety of
        different merge techniques has been proposed. While initial techniques were
        purely based on textual merging, more powerful approaches also take the syntax
        and semantics of the software into account. There is a tendency towards
        operation-based merging because of its increased expressiveness. Another tendency
        is to try to define merge techniques that are as general, accurate, scalable, and
        customizable as possible, so that they can be used in any phase in the software
        life-cycle and detect as many conflicts as possible. After comparing the possible
        merge techniques, we suggest a number of important open problems and future
        research directions}, 
  www_section = {conflict detection, large-scale software development, merge conflicts,
        software merging}, 
  issn = {0098-5589}, 
  doi = {10.1109/TSE.2002.1000449}, 
  url = {http://portal.acm.org/citation.cfm?id=567178$\#$}, 
}
568525
@article{568525,
  title = {A survey of rollback-recovery protocols in message-passing systems}, 
  author = {Mootaz Elnozahy and Lorenzo Alvisi and Yi-Min Wang and Johnson, David B.}, 
  journal = {ACM Comput. Surv}, 
  volume = {34}, 
  number = {3}, 
  year = {2002}, 
  address = {New York, NY, USA}, 
  pages = {375--408}, 
  publisher = {ACM}, 
  abstract = {This survey covers rollback-recovery techniques that do not require special
        language constructs. In the first part of the survey we classify
        rollback-recovery protocols into checkpoint-based and log-based. Checkpoint-based
        protocols rely solely on checkpointing for system state restoration.
        Checkpointing can be coordinated, uncoordinated, or communication-induced.
        Log-based protocols combine checkpointing with logging of nondeterministic
        events, encoded in tuples called determinants. Depending on how determinants are
        logged, log-based protocols can be pessimistic, optimistic, or causal. Throughout
        the survey, we highlight the research issues that are at the core of
        rollback-recovery and present the solutions that currently address them. We also
        compare the performance of different rollback-recovery protocols with respect to
        a series of desirable properties and discuss the issues that arise in the
        practical implementations of these protocols}, 
  www_section = {message logging, rollback-recovery}, 
  issn = {0360-0300}, 
  doi = {10.1145/568522.568525}, 
  url = {http://portal.acm.org/citation.cfm?id=568522.568525$\#$}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/CMU-CS-99-148.pdf}, 
}
571638
@article{571638,
  title = {COCA: A secure distributed online certification authority}, 
  author = {Zhou, Lidong and Schneider, Fred B. and Robbert Van Renesse}, 
  journal = {ACM Trans. Comput. Syst}, 
  volume = {20}, 
  number = {4}, 
  year = {2002}, 
  address = {New York, NY, USA}, 
  pages = {329--368}, 
  publisher = {ACM}, 
  abstract = {COCA is a fault-tolerant and secure online certification authority that has
        been built and deployed both in a local area network and in the Internet.
        Extremely weak assumptions characterize environments in which COCA's protocols
        execute correctly: no assumption is made about execution speed and message
        delivery delays; channels are expected to exhibit only intermittent reliability;
        and with 3t + 1 COCA servers up to t may be faulty or compromised. COCA is the
        first system to integrate a Byzantine quorum system (used to achieve
        availability) with proactive recovery (used to defend against mobile adversaries
        which attack, compromise, and control one replica for a limited period of time
        before moving on to another). In addition to tackling problems associated with
        combining fault-tolerance and security, new proactive recovery protocols had to
        be developed. Experimental results give a quantitative evaluation for the cost
        and effectiveness of the protocols}, 
  www_section = {byzantine fault tolerance, certification authority, denial-of-service,
        proactive secret-sharing, public key cryptography, threshold cryptography}, 
  issn = {0734-2071}, 
  doi = {10.1145/571637.571638}, 
  url = {http://portal.acm.org/citation.cfm?id=571638$\#$}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/cocaTOCS.pdf}, 
}
571857
@conference{571857,
  title = {Viceroy: a scalable and dynamic emulation of the butterfly}, 
  author = {Malkhi, Dahlia and Moni Naor and Ratajczak, David}, 
  booktitle = {PODC '02: Proceedings of the twenty-first annual symposium on Principles of
        distributed computing}, 
  organization = {ACM}, 
  year = {2002}, 
  address = {New York, NY, USA}, 
  pages = {183--192}, 
  publisher = {ACM}, 
  abstract = {We propose a family of constant-degree routing networks of logarithmic
        diameter, with the additional property that the addition or removal of a node to
        the network requires no global coordination, only a constant number of linkage
        changes in expectation, and a logarithmic number with high probability. Our
        randomized construction improves upon existing solutions, such as balanced search
        trees, by ensuring that the congestion of the network is always within a
        logarithmic factor of the optimum with high probability. Our construction derives
        from recent advances in the study of peer-to-peer lookup networks, where rapid
        changes require efficient and distributed maintenance, and where the lookup
        efficiency is impacted both by the lengths of paths to requested data and the
        presence or elimination of bottlenecks in the network}, 
  www_section = {P2P}, 
  isbn = {1-58113-485-1}, 
  doi = {10.1145/571825.571857}, 
  url = {http://portal.acm.org/citation.cfm?id=571857$\#$}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/viceroy.pdf}, 
}
581193
@article{581193,
  title = {Modelling with Generalized Stochastic Petri Nets}, 
  author = {Marco Ajmone Marsan and Gianfranco Balbo and Gianni Conte and Susanna Donatelli
        and Giuliana Franceschinis}, 
  journal = {SIGMETRICS Perform. Eval. Rev}, 
  volume = {26}, 
  number = {2}, 
  year = {1998}, 
  address = {New York, NY, USA}, 
  pages = {0--2}, 
  publisher = {ACM}, 
  issn = {0163-5999}, 
  doi = {10.1145/288197.581193}, 
  url = {http://portal.acm.org/citation.cfm?id=581193$\#$}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.83.6433.pdf}, 
%%%%% ERROR: Missing field
% www_section = {?????},
}
586136
@conference{586136,
  title = {Query-flood DoS attacks in gnutella}, 
  author = {Daswani, Neil and Hector Garcia-Molina}, 
  booktitle = {CCS '02: Proceedings of the 9th ACM conference on Computer and
        communications security}, 
  organization = {ACM}, 
  year = {2002}, 
  address = {New York, NY, USA}, 
  pages = {181--192}, 
  publisher = {ACM}, 
  abstract = {We describe a simple but effective traffic model that can be used to
        understand the effects of denial-of-service (DoS) attacks based on query floods
        in Gnutella networks. We run simulations based on the model to analyze how
        different choices of network topology and application level load balancing
        policies can minimize the effect of these types of DoS attacks. In addition, we
        also study how damage caused by query floods is distributed throughout the
        network, and how application-level policies can localize the damage}, 
  www_section = {denial-of-service, P2P}, 
  isbn = {1-58113-612-9}, 
  doi = {10.1145/586110.586136}, 
  url = {http://portal.acm.org/citation.cfm?id=586110.586136$\#$}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/p115-daswani_0.pdf}, 
}
605408
@article{605408,
  title = {Energy-efficient computing for wildlife tracking: design tradeoffs and early
        experiences with ZebraNet}, 
  author = {Juang, Philo and Oki, Hidekazu and Wang, Yong and Martonosi, Margaret and Peh,
        Li Shiuan and Rubenstein, Daniel}, 
  journal = {SIGARCH Comput. Archit. News}, 
  volume = {30}, 
  number = {5}, 
  year = {2002}, 
  address = {New York, NY, USA}, 
  pages = {96--107}, 
  publisher = {ACM}, 
  abstract = {Over the past decade, mobile computing and wireless communication have become
        increasingly important drivers of many new computing applications. The field of
        wireless sensor networks particularly focuses on applications involving
        autonomous use of compute, sensing, and wireless communication devices for both
        scientific and commercial purposes. This paper examines the research decisions
        and design tradeoffs that arise when applying wireless peer-to-peer networking
        techniques in a mobile sensor network designed to support wildlife tracking for
        biology research.The ZebraNet system includes custom tracking collars (nodes)
        carried by animals under study across a large, wild area; the collars operate as
        a peer-to-peer network to deliver logged data back to researchers. The collars
        include global positioning system (GPS), Flash memory, wireless transceivers, and
        a small CPU; essentially each node is a small, wireless computing device. Since
        there is no cellular service or broadcast communication covering the region where
        animals are studied, ad hoc, peer-to-peer routing is needed. Although numerous ad
        hoc protocols exist, additional challenges arise because the researchers
        themselves are mobile and thus there is no fixed base station towards which to
        aim data. Overall, our goal is to use the least energy, storage, and other
        resources necessary to maintain a reliable system with a very high
        {\textquoteleft}data homing' success rate. We plan to deploy a 30-node ZebraNet
        system at the Mpala Research Centre in central Kenya. More broadly, we believe
        that the domain-centric protocols and energy tradeoffs presented here for
        ZebraNet will have general applicability in other wireless and sensor
        applications}, 
  issn = {0163-5964}, 
  doi = {10.1145/635506.605408}, 
  url = {http://portal.acm.org/citation.cfm?id=635506.605408$\#$}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/asplos-x_annot.pdf}, 
%%%%% ERROR: Missing field
% www_section = {?????},
}
627372
@inproceedings{627372,
  title = {SURF-2: A program for dependability evaluation of complex hardware and software
        systems}, 
  author = {C. Beounes and M. Aguera and J. Arlat and S. Bachmann and C. Bourdeau and J. -.
        Doucet and K. Kanoun and J. -. Laprie and S. Metge and J. Moreira de Souza and D.
        Powell and P. Spiesser}, 
  booktitle = {Proceedings of FTCS-23 The Twenty-Third International Symposium on
        Fault-Tolerant Computing}, 
  volume = {}, 
  number = {}, 
  year = {1993}, 
  month = {June}, 
  pages = {668--673}, 
  abstract = {SURF-2, a software tool for evaluating system dependability, is described. It
        is especially designed for an evaluation-based system design approach in which
        multiple design solutions need to be compared from the dependability viewpoint.
        System behavior may be modeled either by Markov chains or by generalized
        stochastic Petri nets. The tool supports the evaluation of different measures of
        dependability, including pointwise measures, asymptotic measures, mean sojourn
        times and, by superposing a reward structure on the behavior model, reward
        measures such as expected performance or cost}, 
  www_section = {software reliability, system behaviour, SURF-2, dependability evaluation,
        complex hardware and software systems, software tool, system dependability,
        evaluation-based system design approach, multiple design solutions, Markov
        chains, generalized stochastic Petri nets, measures of dependability, pointwise
        measures, asymptotic measures, mean sojourn times, reward structure, reward
        measures, performance, Hardware, Software systems, Stochastic systems, Petri
        nets, Software tools, Process design, Stochastic processes, Humans, Costs,
        Performance evaluation}, 
  doi = {10.1109/FTCS.1993.627372}, 
  issn = {0731-3071}, 
  isbn = {0-8186-3680-7}, 
  url = {https://ieeexplore.ieee.org/document/627372/authors#authors}, 
}
633027
@conference{633027,
  title = {Understanding BGP misconfiguration}, 
  author = {Mahajan, Ratul and Wetherall, David and Anderson, Thomas}, 
  booktitle = {SIGCOMM '02: Proceedings of the 2002 conference on Applications,
        technologies, architectures, and protocols for computer communications}, 
  organization = {ACM}, 
  year = {2002}, 
  address = {New York, NY, USA}, 
  pages = {3--16}, 
  publisher = {ACM}, 
  abstract = {It is well-known that simple, accidental BGP configuration errors can disrupt
        Internet connectivity. Yet little is known about the frequency of
        misconfiguration or its causes, except for the few spectacular incidents of
        widespread outages. In this paper, we present the first quantitative study of BGP
        misconfiguration. Over a three week period, we analyzed routing table
        advertisements from 23 vantage points across the Internet backbone to detect
        incidents of misconfiguration. For each incident we polled the ISP operators
        involved to verify whether it was a misconfiguration, and to learn the cause of
        the incident. We also actively probed the Internet to determine the impact of
        misconfiguration on connectivity.Surprisingly, we find that configuration errors
        are pervasive, with 200-1200 prefixes (0.2-1.0\% of the BGP table size) suffering
        from misconfiguration each day. Close to 3 in 4 of all new prefix advertisements
        were results of misconfiguration. Fortunately, the connectivity seen by end users
        is surprisingly robust to misconfigurations. While misconfigurations can
        substantially increase the update load on routers, only one in twenty five
        affects connectivity. While the causes of misconfiguration are diverse, we argue
        that most could be prevented through better router design}, 
  www_section = {border gateway protocol}, 
  isbn = {1-58113-570-X}, 
  doi = {10.1145/633025.633027}, 
  url = {http://portal.acm.org/citation.cfm?id=633027$\#$}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/bgpmisconfig.pdf}, 
}
633045
@conference{633045,
  title = {Scalable application layer multicast}, 
  author = {Banerjee, Suman and Bobby Bhattacharjee and Kommareddy, Christopher}, 
  booktitle = {SIGCOMM '02: Proceedings of the 2002 conference on Applications,
        technologies, architectures, and protocols for computer communications}, 
  organization = {ACM}, 
  year = {2002}, 
  address = {New York, NY, USA}, 
  pages = {205--217}, 
  publisher = {ACM}, 
  abstract = {We describe a new scalable application-layer multicast protocol, specifically
        designed for low-bandwidth, data streaming applications with large receiver sets.
        Our scheme is based upon a hierarchical clustering of the application-layer
        multicast peers and can support a number of different data delivery trees with
        desirable properties.We present extensive simulations of both our protocol and
        the Narada application-layer multicast protocol over Internet-like topologies.
        Our results show that for groups of size 32 or more, our protocol has lower link
        stress (by about 25\%), improved or similar end-to-end latencies and similar
        failure recovery properties. More importantly, it is able to achieve these
        results by using orders of magnitude lower control traffic.Finally, we present
        results from our wide-area testbed in which we experimented with 32-100 member
        groups distributed over 8 different sites. In our experiments, average group
        members established and maintained low-latency paths and incurred a maximum
        packet loss rate of less than 1\% as members randomly joined and left the
        multicast group. The average control overhead during our experiments was less
        than 1 Kbps for groups of size 100}, 
  www_section = {application layer multicast, hierarchy, overlay networks, P2P,
        scalability}, 
  isbn = {1-58113-570-X}, 
  doi = {10.1145/633025.633045}, 
  url = {http://portal.acm.org/citation.cfm?id=633045$\#$}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/sigcomm02.pdf}, 
}
642636
@conference{642636,
  title = {Usability and privacy: a study of Kazaa P2P file-sharing}, 
  author = {Good, Nathaniel S. and Krekelberg, Aaron}, 
  booktitle = {CHI '03: Proceedings of the SIGCHI conference on Human factors in computing
        systems}, 
  organization = {ACM}, 
  year = {2003}, 
  address = {New York, NY, USA}, 
  pages = {137--144}, 
  publisher = {ACM}, 
  abstract = {P2P file sharing systems such as Gnutella, Freenet, and KaZaA, while
        primarily intended for sharing multimedia files, frequently allow other types of
        information to be shared. This raises serious concerns about the extent to which
        users may unknowingly be sharing private or personal information.In this paper,
        we report on a cognitive walkthrough and a laboratory user study of the KaZaA
        file sharing user interface. The majority of the users in our study were unable
        to tell what files they were sharing, and sometimes incorrectly assumed they were
        not sharing any files when in fact they were sharing all files on their hard
        drive. An analysis of the KaZaA network suggested that a large number of users
        appeared to be unwittingly sharing personal and private files, and that some
        users were indeed taking advantage of this and downloading files containing
        ostensibly private information}, 
  www_section = {file-sharing, P2P}, 
  isbn = {1-58113-630-7}, 
  doi = {10.1145/642611.642636}, 
  url = {http://portal.acm.org/citation.cfm?id=642611.642636$\#$}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/HPL-2002-163.pdf}, 
}
646334
@booklet{646334,
  title = {IPTPS '01: Revised Papers from the First International Workshop on Peer-to-Peer
        Systems}, 
  author = {TODO}, 
  year = {2002}, 
  address = {London, UK}, 
  editor = {Peter Druschel and Frans M. Kaashoek and Antony Rowstron}, 
  publisher = {Springer-Verlag}, 
  isbn = {3-540-44179-4}, 
  url = {http://portal.acm.org/citation.cfm?id=646334$\#$}, 
%%%%% ERROR: Missing field
% www_section = {?????},
}
651321
@conference{651321,
  title = {Venti: A New Approach to Archival Storage}, 
  author = {Quinlan, Sean and Dorward, Sean}, 
  booktitle = {FAST '02: Proceedings of the Conference on File and Storage Technologies}, 
  organization = {USENIX Association}, 
  year = {2002}, 
  address = {Berkeley, CA, USA}, 
  pages = {89--101}, 
  publisher = {USENIX Association}, 
  abstract = {This paper describes a network storage system, called Venti, intended for
        archival data. In this system, a unique hash of a block's contents acts as the
        block identifier for read and write operations. This approach enforces a
        write-once policy, preventing accidental or malicious destruction of data. In
        addition, duplicate copies of a block can be coalesced, reducing the consumption
        of storage and simplifying the implementation of clients. Venti is a building
        block for constructing a variety of storage applications such as logical backup,
        physical backup, and snapshot file systems}, 
  www_section = {backup, file systems, network storage}, 
  isbn = {1-880446-03-0}, 
  url = {http://portal.acm.org/citation.cfm?id=651321$\#$}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/venti-fast.pdf}, 
}
664025
@conference{664025,
  title = {AMnet 2.0: An Improved Architecture for Programmable Networks}, 
  author = {Thomas Fuhrmann and Till Harbaum and Marcus Schoeller and Martina Zitterbart}, 
  booktitle = {IWAN '02: Proceedings of the IFIP-TC6 4th International Working Conference
        on Active Networks}, 
  organization = {Springer-Verlag}, 
  year = {2002}, 
  address = {London, UK}, 
  pages = {162--176}, 
  publisher = {Springer-Verlag}, 
  abstract = {AMnet 2.0 is an improved architecture for programmable networks that is based
        on the experiences from the previous implementation of AMnet. This paper gives an
        overview of the AMnet architecture and Linux-based implementation of this
        software router. It also discusses the differences to the previous version of
        AMnet. AMnet 2.0 complements application services with net-centric services in an
        integrated system that provides the fundamental building blocks both for an
        active node itself and the operation of a larger set of nodes, including code
        deployment decisions, service relocation, resource management}, 
  www_section = {programmable networks}, 
  isbn = {3-540-00223-5}, 
  doi = {10.1007/3-540-36199-5}, 
  url = {http://portal.acm.org/citation.cfm?id=664025$\#$}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/fuhrmann02architecture_0.pdf},
}
672869
@conference{672869,
  title = {Balanced Distributed Search Trees Do Not Exist}, 
  author = {Kr{\"o}ll, Brigitte and Widmayer, Peter}, 
  booktitle = {WADS '95: Proceedings of the 4th International Workshop on Algorithms and
        Data Structures}, 
  organization = {Springer-Verlag}, 
  year = {1995}, 
  address = {London, UK}, 
  pages = {50--61}, 
  publisher = {Springer-Verlag}, 
  abstract = {This paper is a first step towards an understanding of the inherent
        limitations of distributed data structures. We propose a model of distributed
        search trees that is based on few natural assumptions. We prove that any class of
        trees within our model satisfies a lower bound of \Omega\Gamma p m) on the worst
        case height of distributed search trees for m keys. That is, unlike in the single
        site case, balance in the sense that the tree height satisfies a logarithmic
        upper bound cannot be achieved. This is true although each node is allowed to
        have arbitrary degree (note that in this case, the height of a single site search
        tree is trivially bounded by one). By proposing a method that generates trees of
        height O( p m), we show the bound to be tight. 1 Introduction Distributed data
        structures have attracted considerable attention in the past few years. From a
        practical viewpoint, this is due to the increasing availability of networks of
        workstations}, 
  isbn = {3-540-60220-8}, 
  url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.34.4081}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.34.4081.pdf}, 
  www_section = {Unsorted}, 
}
687810
@conference{687810,
  title = {Security Considerations for Peer-to-Peer Distributed Hash Tables}, 
  author = {Emil Sit and Robert Morris}, 
  booktitle = {IPTPS '01: Revised Papers from the First International Workshop on
        Peer-to-Peer Systems}, 
  organization = {Springer-Verlag}, 
  year = {2002}, 
  address = {London, UK}, 
  pages = {261--269}, 
  publisher = {Springer-Verlag}, 
  abstract = {Recent peer-to-peer research has focused on providing efficient hash lookup
        systems that can be used to build more complex systems. These systems have good
        properties when their algorithms are executed correctly but have not generally
        considered how to handle misbehaving nodes. This paper looks at what sorts of
        security problems are inherent in large peer-to-peer systems based on distributed
        hash lookup systems. We examine the types of problems that such systems might
        face, drawing examples from existing systems, and propose some design principles
        for detecting and preventing these problems}, 
  www_section = {distributed hash table, P2P}, 
  isbn = {3-540-44179-4}, 
  url = {http://portal.acm.org/citation.cfm?id=687810$\#$}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/173.pdf}, 
}
687814
@conference{687814,
  title = {Erasure Coding Vs. Replication: A Quantitative Comparison}, 
  author = {Weatherspoon, Hakim and John Kubiatowicz}, 
  booktitle = {IPTPS '01: Revised Papers from the First International Workshop on
        Peer-to-Peer Systems}, 
  organization = {Springer-Verlag}, 
  year = {2002}, 
  address = {London, UK}, 
  pages = {328--338}, 
  publisher = {Springer-Verlag}, 
  abstract = {Peer-to-peer systems are positioned to take advantage of gains in network
        bandwidth, storage capacity, and computational resources to provide long-term
        durable storage infrastructures. In this paper, we quantitatively compare
        building a distributed storage infrastructure that is self-repairing and
        resilient to faults using either a replicated system or an erasure-resilient
        system. We show that systems employing erasure codes have mean time to failures
        many orders of magnitude higher than replicated systems with similar storage and
        bandwidth requirements. More importantly, erasure-resilient systems use an order
        of magnitude less bandwidth and storage to provide similar system durability as
        replicated systems}, 
  www_section = {distributed storage, erasure coding, P2P}, 
  isbn = {3-540-44179-4}, 
  doi = {10.1007/3-540-45748-8}, 
  url = {http://www.springerlink.com/content/e1kmcf729e6updgm/}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/IPTPS\%2701\%20-\%20Erasure\%20coding\%20vs.\%20replication.pdf},
}
697650
@conference{697650,
  title = {Pastry: Scalable, Decentralized Object Location, and Routing for Large-Scale
        Peer-to-Peer Systems}, 
  author = {Antony Rowstron and Peter Druschel}, 
  booktitle = {Middleware '01: Proceedings of the IFIP/ACM International Conference on
        Distributed Systems Platforms Heidelberg}, 
  organization = {Springer-Verlag}, 
  year = {2001}, 
  address = {London, UK}, 
  pages = {329--350}, 
  publisher = {Springer-Verlag}, 
  abstract = {This paper presents the design and evaluation of Pastry, a scalable,
        distributed object location and routing substrate for wide-area peer-to-peer
        applications.Pastry performs application-level routing and object location in a
        potentially very large overlay network of nodes connected via the Internet. It
        can be used to support a variety of peer-to-peer applications, including global
        data storage, data sharing, group communication and naming. Each node in the
        Pastry network has a unique identifier (nodeId). When presented with a message
        and a key, a Pastry node efficiently routes the message to the node with a nodeId
        that is numerically closest to the key, among all currently live Pastry nodes.
        Each Pastry node keeps track of its immediate neighbors in the nodeId space, and
        notifies applications of new node arrivals, node failures and recoveries. Pastry
        takes into account network locality; it seeks to minimize the distance messages
        travel, according to a to scalar proximity metric like the number of IP routing
        hops. Pastry is completely decentralized, scalable, and self-organizing; it
        automatically adapts to the arrival, departure and failure of nodes. Experimental
        results obtained with a prototype implementation on an emulated network of up to
        100,000 nodes confirm Pastry's scalability and efficiency, its ability to
        self-organize and adapt to node failures, and its good network locality
        properties}, 
  www_section = {overlay networks, P2P}, 
  isbn = {3-540-42800-3}, 
  url = {http://portal.acm.org/citation.cfm?id=697650$\#$}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/pastry.pdf}, 
}
713855
@conference{713855,
  title = {Design Evolution of the EROS Single-Level Store}, 
  author = {Shapiro, Jonathan S. and Adams, Jonathan}, 
  booktitle = {ATEC '02: Proceedings of the General Track of the annual conference on
        USENIX Annual Technical Conference}, 
  organization = {USENIX Association}, 
  year = {2002}, 
  address = {Berkeley, CA, USA}, 
  pages = {59--72}, 
  publisher = {USENIX Association}, 
  abstract = {File systems have (at least) two undesirable characteristics: both the
        addressing model and the consistency semantics differ from those of memory,
        leading to a change in programming model at the storage boundary. Main memory is
        a single flat space of pages with a simple durability (persistence) model: all or
        nothing. File content durability is a complex function of implementation,
        caching, and timing. Memory is globally consistent. File systems offer no global
        consistency model. Following a crash recovery, individual files may be lost or
        damaged, or may be collectively inconsistent even though they are individually
        sound}, 
  www_section = {file systems}, 
  isbn = {1-880446-00-6}, 
  url = {http://portal.acm.org/citation.cfm?id=713855$\#$}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/storedesign2002.pdf}, 
}
714768
@conference{714768,
  title = {Aspects of AMnet Signaling}, 
  author = {Speer, Anke and Marcus Schoeller and Thomas Fuhrmann and Martina Zitterbart}, 
  booktitle = {NETWORKING '02: Proceedings of the Second International IFIP-TC6 Networking
        Conference on Networking Technologies, Services, and Protocols; Performance of
        Computer and Communication Networks; and Mobile and Wireless Communications}, 
  organization = {Springer-Verlag}, 
  year = {2002}, 
  address = {London, UK}, 
  pages = {1214--1220}, 
  publisher = {Springer-Verlag}, 
  abstract = {AMnet provides a framework for flexible and rapid service creation. It is
        based on Programmable Networking technologies and uses active nodes (AMnodes)
        within the network for the provision of individual, application-specific
        services. To this end, these AMnodes execute service modules that are loadable
        on-demand and enhance the functionality of intermediate systems without the need
        of long global standardization processes. Placing application-dedicated
        functionality within the network requires a flexible signaling protocol to
        discover and announce as well as to establish and maintain the corresponding
        services. AMnet Signaling was developed for this purpose and will be presented in
        detail within this paper}, 
  www_section = {multicast, programmable networks}, 
  isbn = {3-540-43709-6}, 
  doi = {10.1007/3-540-47906-6}, 
  url = {http://www.springerlink.com/content/4j371710765jg14q/}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/speer02networking.pdf}, 
}
715916
@conference{715916,
  title = {CPCMS: A Configuration Management System Based on Cryptographic Names}, 
  author = {Shapiro, Jonathan S. and Vanderburgh, John}, 
  booktitle = {Proceedings of the FREENIX Track: 2002 USENIX Annual Technical Conference}, 
  organization = {USENIX Association}, 
  year = {2002}, 
  address = {Berkeley, CA, USA}, 
  pages = {207--220}, 
  publisher = {USENIX Association}, 
  abstract = {CPCMS, the Cryptographically Protected Configuration Management System is a
        new configuration management system that provides scalability, disconnected
        commits, and fine-grain access controls. It addresses the novel problems raised
        by modern open-source development practices, in which projects routinely span
        traditional organizational boundaries and can involve thousands of participants.
        CPCMS provides for simultaneous public and private lines of development, with
        post hoc "publication" of private branches}, 
  isbn = {1-880446-01-4}, 
  url = {http://portal.acm.org/citation.cfm?id=715916$\#$}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.61.3184.pdf}, 
%%%%% ERROR: Missing field
% www_section = {?????},
}
716407
@conference{716407,
  title = {An Empirical Study of Delta Algorithms}, 
  author = {Hunt, James J. and Vo, Kiem-Phong and Tichy, Walter F.}, 
  booktitle = {ICSE '96: Proceedings of the SCM-6 Workshop on System Configuration
        Management}, 
  organization = {Springer-Verlag}, 
  year = {1996}, 
  address = {London, UK}, 
  pages = {49--66}, 
  publisher = {Springer-Verlag}, 
  abstract = {Delta algorithms compress data by encoding one file in terms of another. This
        type of compression is useful in a number of situations: storing multiple
        versions of data, distributing updates, storing backups, transmitting video
        sequences, and others. This paper studies the performance parameters of several
        delta algorithms, using a benchmark of over 1300 pairs of files taken from two
        successive releases of GNU software. Results indicate that modern delta
        compression algorithms based on Ziv-Lempel techniques significantly outperform
        diff, a popular but older delta compressor, in terms of compression ratio. The
        modern compressors also correlate better with the actual difference between
        files; one of them is even faster than diff in both compression and decompression
        speed}, 
  isbn = {3-540-61964-X}, 
  doi = {10.1007/BFb0023076}, 
  url = {http://www.springerlink.com/content/584k258285p18x4g/}, 
  www_section = {Unsorted}, 
}
747489
@conference{747489,
  title = {Extremum Feedback for Very Large Multicast Groups}, 
  author = {J{\"o}rg Widmer and Thomas Fuhrmann}, 
  booktitle = {NGC '01: Proceedings of the Third International COST264 Workshop on
        Networked Group Communication}, 
  organization = {Springer-Verlag}, 
  year = {2001}, 
  address = {London, UK}, 
  pages = {56--75}, 
  publisher = {Springer-Verlag}, 
  abstract = {In multicast communication, it is often required that feedback is received
        from a potentially very large group of responders while at the same time a
        feedback implosion needs to be pre- vented. To this end, a number of feedback
        control mechanisms have been proposed, which rely either on tree-based feedback
        aggregation or timer-based feedback suppression. Usually, these mechanisms assume
        that it is not necessary to discriminate be- tween feedback from different
        receivers. However, for many applications this is not the case and feedback from
        receivers with certain response values is preferred (e.g., highest loss or
        largest delay)}, 
  www_section = {multicast}, 
  isbn = {3-540-42824-0}, 
  url = {http://portal.acm.org/citation.cfm?id=648089.747489$\#$}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Widmer2001g.pdf}, 
}
747491
@conference{747491,
  title = {Application-Level Multicast Using Content-Addressable Networks}, 
  author = {Sylvia Paul Ratnasamy and Handley, Mark and Richard Karp and S Shenker}, 
  booktitle = {NGC '01: Proceedings of the Third International COST264 Workshop on
        Networked Group Communication}, 
  organization = {Springer-Verlag}, 
  year = {2001}, 
  address = {London, UK}, 
  pages = {14--29}, 
  publisher = {Springer-Verlag}, 
  abstract = {Most currently proposed solutions to application-level multicast organise the
        group members into an application-level mesh over which a Distance-Vector
        routingp rotocol, or a similar algorithm, is used to construct source-rooted
        distribution trees. The use of a global routing protocol limits the scalability
        of these systems. Other proposed solutions that scale to larger numbers of
        receivers do so by restricting the multicast service model to be single-sourced.
        In this paper, we propose an application-level multicast scheme capable of
        scaling to large group sizes without restrictingthe service model to a single
        source. Our scheme builds on recent work on Content-Addressable Networks (CANs).
        Extendingthe CAN framework to support multicast comes at trivial additional cost
        and, because of the structured nature of CAN topologies, obviates the need for a
        multicast routingalg orithm. Given the deployment of a distributed infrastructure
        such as a CAN, we believe our CAN-based multicast scheme offers the dual
        advantages of simplicity and scalability}, 
  www_section = {CAN, mesh networks}, 
  isbn = {3-540-42824-0}, 
  doi = {10.1007/3-540-45546-9}, 
  url = {http://www.springerlink.com/content/ahdgfj8yj9exqe03/}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/can-mcast.pdf}, 
}
758535
@conference{758535,
  title = {New Sequences of Linear Time Erasure Codes Approaching the Channel Capacity}, 
  author = {M. Amin Shokrollahi}, 
  booktitle = {AAECC-13: Proceedings of the 13th International Symposium on Applied
        Algebra, Algebraic Algorithms and Error-Correcting Codes}, 
  organization = {Springer-Verlag}, 
  year = {1999}, 
  address = {London, UK}, 
  pages = {65--76}, 
  publisher = {Springer-Verlag}, 
  abstract = {We will introduce a new class of erasure codes built from irregular bipartite
        graphs that have linear time encoding and decoding algorithms and can transmit
        over an erasure channel at rates arbitrarily close to the channel capacity. We
        also show that these codes are close to optimal with respect to the trade-off
        between the proximity to the channel capacity and the running time of the
        recovery algorithm}, 
  www_section = {coding theory, irregular bipartite graphs, recovery algorithm}, 
  isbn = {3-540-66723-7}, 
  url = {http://portal.acm.org/citation.cfm?id=758535\&dl=GUIDE\&coll=GUIDE\&CFID=102355791\&CFTOKEN=32605420$\#$},
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/new_sequences_of_linear_time_erasure_cod_64778.pdf},
}
766661
@article{766661,
  title = {Self-Organized Public-Key Management for Mobile Ad Hoc Networks}, 
  author = {Capkun, Srdjan and Butty{\'a}n, Levente and Hubaux, J-P}, 
  journal = {IEEE Transactions on Mobile Computing}, 
  volume = {2}, 
  number = {1}, 
  year = {2003}, 
  address = {Piscataway, NJ, USA}, 
  pages = {52--64}, 
  publisher = {IEEE Educational Activities Department}, 
  abstract = {In contrast with conventional networks, mobile ad hoc networks usually do not
        provide online access to trusted authorities or to centralized servers, and they
        exhibit frequent partitioning due to link and node failures and to node mobility.
        For these reasons, traditional security solutions that require online trusted
        authorities or certificate repositories are not well-suited for securing ad hoc
        networks. In this paper, we propose a fully self-organized public-key management
        system that allows users to generate their public-private key pairs, to issue
        certificates, and to perform authentication regardless of the network partitions
        and without any centralized services. Furthermore, our approach does not require
        any trusted authority, not even in the system initialization phase}, 
  www_section = {ad-hoc networks, key authentication, PGP, public key cryptography,
        self-organization}, 
  issn = {1536-1233}, 
  doi = {10.1109/TMC.2003.1195151}, 
  url = {http://portal.acm.org/citation.cfm?id=766655.766661$\#$}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.6.1545.pdf}, 
}
776703
@article{776703,
  title = {Security Performance}, 
  author = {Menasc{\'e}, Daniel}, 
  journal = {IEEE Internet Computing}, 
  volume = {7}, 
  number = {3}, 
  year = {2003}, 
  address = {Piscataway, NJ, USA}, 
  pages = {84--87}, 
  publisher = {IEEE Educational Activities Department}, 
  abstract = {Several protocols and mechanisms aim to enforce the various dimensions of
        security in applications ranging from email to e-commerce transactions. Adding
        such mechanisms and proceduresto applications and systems does not come cheaply,
        however, as they impose security trade-offs in the areas of performance and
        scalability}, 
  www_section = {security policy, trade-off}, 
  issn = {1089-7801}, 
  doi = {10.1109/MIC.2003.1200305}, 
  url = {http://portal.acm.org/citation.cfm?id=776703$\#$}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/IEEE-IC-SecurityPerformance-May-2003.pdf},
}
778418
@conference{778418,
  title = {A charging and rewarding scheme for packet forwarding in multi-hop cellular
        networks}, 
  author = {Salem, Naouel Ben and Levente Butty{\'a}n and Jean-Pierre Hubaux and Jakobsson,
        Markus}, 
  booktitle = {MobiHoc '03: Proceedings of the 4th ACM international symposium on Mobile ad
        hoc networking \& computing}, 
  organization = {ACM}, 
  year = {2003}, 
  address = {New York, NY, USA}, 
  pages = {13--24}, 
  publisher = {ACM}, 
  abstract = {In multi-hop cellular networks, data packets have to be relayed hop by hop
        from a given mobile station to a base station and vice-versa. This means that the
        mobile stations must accept to forward information for the benefit of other
        stations. In this paper, we propose an incentive mechanism that is based on a
        charging/rewarding scheme and that makes collaboration rational for selfish
        nodes. We base our solution on symmetric cryptography to cope with the limited
        resources of the mobile stations. We provide a set of protocols and study their
        robustness with respect to various attacks. By leveraging on the relative
        stability of the routes, our solution leads to a very moderate overhead}, 
  www_section = {ad-hoc networks, charging, cooperation, hybrid cellular networks,
        multi-hop networks, packet forwarding}, 
  isbn = {1-58113-684-6}, 
  doi = {10.1145/778415.778418}, 
  url = {http://portal.acm.org/citation.cfm?id=778418$\#$}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/BenSalemBHJ03mobihoc.pdf},
}
78977
@article{78977,
  title = {Skip lists: a probabilistic alternative to balanced trees}, 
  author = {Pugh, William}, 
  journal = {Commun. ACM}, 
  volume = {33}, 
  number = {6}, 
  year = {1990}, 
  address = {New York, NY, USA}, 
  pages = {668--676}, 
  publisher = {ACM}, 
  abstract = {Skip lists are data structures that use probabilistic balancing rather than
        strictly enforced balancing. As a result, the algorithms for insertion and
        deletion in skip lists are much simpler and significantly faster than equivalent
        algorithms for balanced trees}, 
  www_section = {data structures, search}, 
  issn = {0001-0782}, 
  doi = {10.1145/78973.78977}, 
  url = {http://portal.acm.org/citation.cfm?id=78977$\#$}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.85.9211.pdf}, 
}
792432
@conference{792432,
  title = {Supporting Peer-to-Peer Computing with FlexiNet}, 
  author = {Thomas Fuhrmann}, 
  booktitle = {CCGRID '03: Proceedings of the 3st International Symposium on Cluster
        Computing and the Grid}, 
  organization = {IEEE Computer Society}, 
  year = {2003}, 
  address = {Washington, DC, USA}, 
  pages = {0--390}, 
  publisher = {IEEE Computer Society}, 
  abstract = {Formation of suitable overlay-network topologiesthat are able to reflect the
        structure of the underlying network-infrastructure, has rarely been addressedby
        peer-to-peer applications so far. Often, peer-to-peerprotocols restrain to purely
        random formation of theiroverlay-network. This leads to a far from optimal
        performance of such peer-to-peer networks and ruthlesslywastes network
        resources.In this paper, we describe a simple mechanism thatuses programmable
        network technologies to improvethe topology formation process of unstructured
        peer-to-peer networks. Being a network service, our mechanismdoes not require any
        modification of existing applications or computing systems. By that, it assists
        networkoperators with improving the performance of their network and relieves
        programmers from the burden of designing and implementing topology-aware
        peer-to-peerprotocols.Although we use the well-know Gnutella protocol todescribe
        the mechanism of our proposed service, it applies to all kinds of unstructured
        global peer-to-peercomputing applications}, 
  www_section = {overlay networks, programmable networks, topology matching}, 
  isbn = {0-7695-1919-9}, 
  url = {http://portal.acm.org/citation.cfm?id=791231.792432$\#$}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/fuhrmann03supportingP2P.pdf},
}
792493
@conference{792493,
  title = {An Overlay-Network Approach for Distributed Access to SRS}, 
  author = {Thomas Fuhrmann and Andrea Schafferhans and Etzold, Thure}, 
  booktitle = {CCGRID '03: Proceedings of the 3st International Symposium on Cluster
        Computing and the Grid}, 
  organization = {IEEE Computer Society}, 
  year = {2003}, 
  address = {Washington, DC, USA}, 
  pages = {0--601}, 
  publisher = {IEEE Computer Society}, 
  abstract = {SRS is a widely used system for integrating biologicaldatabases. Currently,
        SRS relies only on locally providedcopies of these databases. In this paper we
        propose a mechanism that also allows the seamless integration of remotedatabases.
        To this end, our proposed mechanism splits theexisting SRS functionality into two
        components and addsa third component that enables us to employ
        peer-to-peercomputing techniques to create optimized overlay-networkswithin which
        database queries can efficiently be routed. Asan additional benefit, this
        mechanism also reduces the administration effort that would be needed with a
        conventionalapproach using replicated databases}, 
  www_section = {overlay networks, P2P, SRS}, 
  isbn = {0-7695-1919-9}, 
  url = {http://portal.acm.org/citation.cfm?id=792493$\#$}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/fuhrmann03overlaySRS.pdf},
}
820485
@conference{820485,
  title = {Energy-Efficient Communication Protocol for Wireless Microsensor Networks}, 
  author = {Heinzelman, Wendi Rabiner and Chandrakasan, Anantha and Hari Balakrishnan}, 
  booktitle = {HICSS '00: Proceedings of the 33rd Hawaii International Conference on System
        Sciences-Volume 8}, 
  organization = {IEEE Computer Society}, 
  year = {2000}, 
  address = {Washington, DC, USA}, 
  pages = {0--8020}, 
  publisher = {IEEE Computer Society}, 
  abstract = {Wireless distributed micro-sensor systems will enable the reliable monitoring
        of a variety of environments for both civil and military applications. In this
        paper, we look at communication protocols, which can have significant impact on
        the overall energy dissipation of these networks.Based on our findings that the
        conventional protocols of direct transmission, minimum-transmission-energy,
        multihop routing, and static clustering may not be optimal for sensor networks,
        we propose LEACH (Low-Energy Adaptive Clustering Hierarchy), a clustering-based
        protocol that utilizes randomized rotation of local cluster base stations
        (cluster-heads) to evenly distribute the energy load among the sensors in the
        network. LEACH uses localized coordination to enable scalability and robustness
        for dynamic net-works, and incorporates data fusion into the routing protocol to
        reduce the amount of information that must be transmitted to the base station.
        Simulations show that LEACH can achieve as much as a factor of 8 reduction in
        energy dissipation compared with conventional routing protocols. In addition,
        LEACH is able to distribute energy dissipation evenly throughout the sensors,
        doubling the useful system lifetime for the networks we simulated}, 
  www_section = {Low-Energy Adaptive Clustering Hierarchy, mobile Ad-hoc networks, routing,
        wireless sensor network}, 
  isbn = {0-7695-0493-0}, 
  url = {http://portal.acm.org/citation.cfm?id=820485$\#$}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.90.8499.pdf}, 
}
844156
@article{844156,
  title = {Secure routing for structured peer-to-peer overlay networks}, 
  author = {Miguel Castro and Peter Druschel and Ganesh, Ayalvadi and Antony Rowstron and
        Dan S. Wallach}, 
  journal = {SIGOPS Oper. Syst. Rev}, 
  volume = {36}, 
  number = {SI}, 
  year = {2002}, 
  address = {New York, NY, USA}, 
  pages = {299--314}, 
  publisher = {ACM}, 
  abstract = {Structured peer-to-peer overlay networks provide a substrate for the
        construction of large-scale, decentralized applications, including distributed
        storage, group communication, and content distribution. These overlays are highly
        resilient; they can route messages correctly even when a large fraction of the
        nodes crash or the network partitions. But current overlays are not secure; even
        a small fraction of malicious nodes can prevent correct message delivery
        throughout the overlay. This problem is particularly serious in open peer-to-peer
        systems, where many diverse, autonomous parties without preexisting trust
        relationships wish to pool their resources. This paper studies attacks aimed at
        preventing correct message delivery in structured peer-to-peer overlays and
        presents defenses to these attacks. We describe and evaluate techniques that
        allow nodes to join the overlay, to maintain routing state, and to forward
        messages securely in the presence of malicious nodes}, 
  www_section = {P2P, resilient overlay network}, 
  issn = {0163-5980}, 
  doi = {10.1145/844128.844156}, 
  url = {http://portal.acm.org/citation.cfm?id=844156$\#$}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/osdi2002.pdf}, 
}
863960
@conference{863960,
  title = {A delay-tolerant network architecture for challenged internets}, 
  author = {Fall, Kevin}, 
  booktitle = {SIGCOMM '03: Proceedings of the 2003 conference on Applications,
        technologies, architectures, and protocols for computer communications}, 
  organization = {ACM}, 
  year = {2003}, 
  address = {New York, NY, USA}, 
  pages = {27--34}, 
  publisher = {ACM}, 
  abstract = {The highly successful architecture and protocols of today's Internet may
        operate poorly in environments characterized by very long delay paths and
        frequent network partitions. These problems are exacerbated by end nodes with
        limited power or memory resources. Often deployed in mobile and extreme
        environments lacking continuous connectivity, many such networks have their own
        specialized protocols, and do not utilize IP. To achieve interoperability between
        them, we propose a network architecture and application interface structured
        around optionally-reliable asynchronous message forwarding, with limited
        expectations of end-to-end connectivity and node resources. The architecture
        operates as an overlay above the transport layers of the networks it
        interconnects, and provides key services such as in-network data storage and
        retransmission, interoperable naming, authenticated forwarding and a
        coarse-grained class of service}, 
  isbn = {1-58113-735-4}, 
  doi = {10.1145/863955.863960}, 
  url = {http://portal.acm.org/citation.cfm?id=863960$\#$}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/IRB-TR-03-003.pdf}, 
%%%%% ERROR: Missing field
% www_section = {?????},
}
864000
@conference{864000,
  title = {Making gnutella-like P2P systems scalable}, 
  author = {Chawathe, Yatin and Breslau, Lee and Lanham, Nick and S Shenker}, 
  booktitle = {SIGCOMM '03: Proceedings of the 2003 conference on Applications,
        technologies, architectures, and protocols for computer communications}, 
  organization = {ACM}, 
  year = {2003}, 
  address = {New York, NY, USA}, 
  pages = {407--418}, 
  publisher = {ACM}, 
  abstract = {Napster pioneered the idea of peer-to-peer file sharing, and supported it
        with a centralized file search facility. Subsequent P2P systems like Gnutella
        adopted decentralized search algorithms. However, Gnutella's notoriously poor
        scaling led some to propose distributed hash table solutions to the wide-area
        file search problem. Contrary to that trend, we advocate retaining Gnutella's
        simplicity while proposing new mechanisms that greatly improve its scalability.
        Building upon prior research [1, 12, 22], we propose several modifications to
        Gnutella's design that dynamically adapt the overlay topology and the search
        algorithms in order to accommodate the natural heterogeneity present in most
        peer-to-peer systems. We test our design through simulations and the results show
        three to five orders of magnitude improvement in total system capacity. We also
        report on a prototype implementation and its deployment on a testbed}, 
  www_section = {distributed hash table, Gnutella, P2P}, 
  isbn = {1-58113-735-4}, 
  doi = {10.1145/863955.864000}, 
  url = {http://portal.acm.org/citation.cfm?id=864000$\#$}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.10.5444.pdf}, 
}
873217
@conference{873217,
  title = {Improving Data Availability through Dynamic Model-Driven Replication in Large
        Peer-to-Peer Communities}, 
  author = {Ranganathan, Kavitha and Iamnitchi, Adriana and Foster, Ian}, 
  booktitle = {CCGRID '02: Proceedings of the 2nd IEEE/ACM International Symposium on
        Cluster Computing and the Grid}, 
  organization = {IEEE Computer Society}, 
  year = {2002}, 
  address = {Washington, DC, USA}, 
  pages = {0--376}, 
  publisher = {IEEE Computer Society}, 
  abstract = {Efficient data sharing in global peer-to-peer systems is complicated by
        erratic node failure, unreliable networkconnectivity and limited
        bandwidth.Replicating data onmultiple nodes can improve availability and response
        time.Yet determining when and where to replicate data in orderto meet performance
        goals in large-scale systems withmany users and files, dynamic network
        characteristics, and changing user behavior is difficult.We propose anapproach in
        which peers create replicas automatically in a decentralized fashion, as required
        to meet availabilitygoals.The aim of our framework is to maintain a
        thresholdlevel of availability at all times.We identify a set of factors that
        hinder data availabilityand propose a model that decides when more replication
        isnecessary.We evaluate the accuracy and performance ofthe proposed model using
        simulations.Our preliminaryresults show that the model is effective in predicting
        therequired number of replicas in the system}, 
  www_section = {data sharing, model-driven, P2P}, 
  isbn = {0-7695-1582-7}, 
  url = {http://portal.acm.org/citation.cfm?id=873217$\#$}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.16.909.pdf}, 
}
896561
@booklet{896561,
  title = {On the Scaling of Feedback Algorithms for Very Large Multicast Groups}, 
  author = {Thomas Fuhrmann}, 
  year = {2000}, 
  publisher = {University of Mannheim}, 
  abstract = {Feedback from multicast group members is vital for many multicast protocols.
        In order to avoid feedback implosion in very large groups feedback algorithms
        with well behaved scaling-properties must be chosen. In this paper we analyse the
        performance of three typical feedback algorithms described in the literature.
        Apart from the basic trade-off between feedback latency and response duplicates
        we especially focus on the algorithms'' sensitivity to the quality of the group
        size estimation. Based on this analysis we give recommendations for the choice of
        well behaved feedback algorithms that are suitable for very large groups}, 
  url = {http://portal.acm.org/citation.cfm?id=896561$\#$}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Fuhrmann2001a.pdf}, 
%%%%% ERROR: Missing field
% www_section = {?????},
}
898770
@booklet{898770,
  title = {Libckpt: Transparent Checkpointing under Unix}, 
  author = {James S. Plank and Beck, Micah and Kingsley, Gerry and Li, Kai}, 
  year = {1994}, 
  address = {Knoxville, TN, USA}, 
  publisher = {University of Tennessee}, 
  abstract = {Checkpointing is a simple technique for rollback recovery: the state of an
        executing program is periodically saved to a disk file from which it can be
        recovered after a failure. While recent research has developed a collection of
        powerful techniques for minimizing the overhead of writing checkpoint files,
        checkpointing remains unavailable to most application developers. In this paper
        we describe libckpt, a portable checkpointing tool for Unix that implements all
        applicable performance optimizations which are reported in the literature. While
        libckpt can be used in a mode which is almost totally transparent to the
        programmer, it also supports the incorporation of user directives into the
        creation of checkpoints. This user-directed checkpointing is an innovation which
        is unique to our work. 1 Introduction Consider a programmer who has developed an
        application which will take a long time to execute, say five days. Two days into
        the computation, the processor on which the application is}, 
  www_section = {checkpointing, performance analysis}, 
  url = {http://portal.acm.org/citation.cfm?id=898770$\#$}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.55.257.pdf}, 
}
937250
@mastersthesis{937250,
  title = {A scalable content-addressable network}, 
  author = {Sylvia Paul Ratnasamy}, 
  school = {University of California, Berkeley}, 
  year = {2002}, 
  type = {phd}, 
  note = {Chair-Shenker, Scott and Chair-Stoica, Ion}, 
  www_section = {CAN, distributed hash table}, 
  url = {www.icir.org/sylvia/thesis.ps}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/can.pdf}, 
}
939011
@conference{939011,
  title = {Ad hoc-VCG: a truthful and cost-efficient routing protocol for mobile ad hoc
        networks with selfish agents}, 
  author = {Anderegg, Luzi and Eidenbenz, Stephan}, 
  booktitle = {MobiCom '03: Proceedings of the 9th annual international conference on
        Mobile computing and networking}, 
  organization = {ACM}, 
  year = {2003}, 
  address = {New York, NY, USA}, 
  pages = {245--259}, 
  publisher = {ACM}, 
  abstract = {We introduce a game-theoretic setting for routing in a mobile ad hoc network
        that consists of greedy, selfish agents who accept payments for forwarding data
        for other agents if the payments cover their individual costs incurred by
        forwarding data. In this setting, we propose Ad hoc-VCG, a reactive routing
        protocol that achieves the design objectives of truthfulness (i.e., it is in the
        agents' best interest to reveal their true costs for forwarding data) and
        cost-efficiency (i.e., it guarantees that routing is done along the most
        cost-efficient path) in a game-theoretic sense by paying to the intermediate
        nodes a premium over their actual costs for forwarding data packets. We show that
        the total overpayment (i.e., the sum of all premiums paid) is relatively small by
        giving a theoretical upper bound and by providing experimental evidence. Our
        routing protocol implements a variation of the well-known mechanism by Vickrey,
        Clarke, and Groves in a mobile network setting. Finally, we analyze a very
        natural routing protocol that is an adaptation of the Packet Purse Model [8] with
        auctions in our setting and show that, unfortunately, it does not achieve
        cost-efficiency or truthfulness}, 
  www_section = {ad-hoc networks, energy efficiency, game theory, mechanism design,
        routing, selfish agents, VCG mechanism}, 
  isbn = {1-58113-753-2}, 
  doi = {10.1145/938985.939011}, 
  url = {http://portal.acm.org/citation.cfm?id=939011$\#$}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.103.7483.pdf}, 
}
939859
@article{939859,
  title = {Wireless Community Networks}, 
  author = {Jain, Saurabh and Agrawal, Dharma P.}, 
  journal = {Computer}, 
  volume = {36}, 
  number = {8}, 
  year = {2003}, 
  address = {Los Alamitos, CA, USA}, 
  pages = {90--92}, 
  publisher = {IEEE Computer Society Press}, 
  issn = {0018-9162}, 
  doi = {10.1109/MC.2003.1220588}, 
  url = {http://portal.acm.org/citation.cfm?id=939824.939859$\#$}, 
%%%%% ERROR: Missing field
% www_section = {?????},
}
942421
@article{942421,
  title = {Stimulating cooperation in self-organizing mobile ad hoc networks}, 
  author = {Levente Butty{\'a}n and Jean-Pierre Hubaux}, 
  journal = {Mob. Netw. Appl}, 
  volume = {8}, 
  number = {5}, 
  year = {2003}, 
  address = {Hingham, MA, USA}, 
  pages = {579--592}, 
  publisher = {Kluwer Academic Publishers}, 
  abstract = {In military and rescue applications of mobile ad hoc networks, all the nodes
        belong to the same authority; therefore, they are motivated to cooperate in order
        to support the basic functions of the network. In this paper, we consider the
        case when each node is its own authority and tries to maximize the benefits it
        gets from the network. More precisely, we assume that the nodes are not willing
        to forward packets for the benefit of other nodes. This problem may arise in
        civilian applications of mobile ad hoc networks. In order to stimulate the nodes
        for packet forwarding, we propose a simple mechanism based on a counter in each
        node. We study the behavior of the proposed mechanism analytically and by means
        of simulations, and detail the way in which it could be protected against
        misuse}, 
  www_section = {ad-hoc networks, cooperation, self-organization}, 
  issn = {1383-469X}, 
  doi = {10.1023/A:1025146013151}, 
  url = {http://portal.acm.org/citation.cfm?id=942421$\#$}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/ButtyanH03monet.pdf}, 
}
948119
@conference{948119,
  title = {Establishing pairwise keys in distributed sensor networks}, 
  author = {Liu, Donggang and Peng Ning}, 
  booktitle = {CCS '03: Proceedings of the 10th ACM conference on Computer and
        communications security}, 
  organization = {ACM}, 
  year = {2003}, 
  address = {New York, NY, USA}, 
  pages = {52--61}, 
  publisher = {ACM}, 
  abstract = {Pairwise key establishment is a fundamental security service in sensor
        networks; it enables sensor nodes to communicate securely with each other using
        cryptographic techniques. However, due to the resource constraints on sensors, it
        is infeasible to use traditional key management techniques such as public key
        cryptography and key distribution center (KDC). To facilitate the study of novel
        pairwise key predistribution techniques, this paper presents a general framework
        for establishing pairwise keys between sensors on the basis of a polynomial-based
        key predistribution protocol [2]. This paper then presents two efficient
        instantiations of the general framework: a random subset assignment key
        predistribution scheme and a grid-based key predistribution scheme. The analysis
        in this paper indicates that these two schemes have a number of nice properties,
        including high probability (or guarantee) to establish pairwise keys, tolerance
        of node captures, and low communication overhead. Finally, this paper presents a
        technique to reduce the computation at sensors required by these schemes}, 
  www_section = {key management, probabilistic key sharing, sensor networks}, 
  isbn = {1-58113-738-9}, 
  doi = {10.1145/948109.948119}, 
  url = {http://portal.acm.org/citation.cfm?id=948119$\#$}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/ccs03-SNKeyMan.pdf}, 
}
958494
@conference{958494,
  title = {Taming the underlying challenges of reliable multihop routing in sensor
        networks}, 
  author = {Woo, Alec and Tong, Terence and Culler, David}, 
  booktitle = {SenSys '03: Proceedings of the 1st international conference on Embedded
        networked sensor systems}, 
  organization = {ACM}, 
  year = {2003}, 
  address = {New York, NY, USA}, 
  pages = {14--27}, 
  publisher = {ACM}, 
  abstract = {The dynamic and lossy nature of wireless communication poses major challenges
        to reliable, self-organizing multihop networks. These non-ideal characteristics
        are more problematic with the primitive, low-power radio transceivers found in
        sensor networks, and raise new issues that routing protocols must address. Link
        connectivity statistics should be captured dynamically through an efficient yet
        adaptive link estimator and routing decisions should exploit such connectivity
        statistics to achieve reliability. Link status and routing information must be
        maintained in a neighborhood table with constant space regardless of cell
        density. We study and evaluate link estimator, neighborhood table management, and
        reliable routing protocol techniques. We focus on a many-to-one, periodic data
        collection workload. We narrow the design space through evaluations on
        large-scale, high-level simulations to 50-node, in-depth empirical experiments.
        The most effective solution uses a simple time averaged EWMA estimator, frequency
        based table management, and cost-based routing}, 
  www_section = {link estimation, multi-hop networks, neighborhood management, reliability,
        sensor networks}, 
  isbn = {1-58113-707-9}, 
  doi = {10.1145/958491.958494}, 
  url = {http://portal.acm.org/citation.cfm?id=958494$\#$}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/p14-woo.pdf}, 
}
987233
@article{987233,
  title = {Internet indirection infrastructure}, 
  author = {Ion Stoica and Adkins, Daniel and Shelley Zhuang and S Shenker and Surana,
        Sonesh}, 
  journal = {IEEE/ACM Trans. Netw}, 
  volume = {12}, 
  number = {2}, 
  year = {2004}, 
  address = {Piscataway, NJ, USA}, 
  pages = {205--218}, 
  publisher = {IEEE Press}, 
  abstract = {Attempts to generalize the Internet's point-to-point communication
        abstraction to provide services like multicast, anycast, and mobility have faced
        challenging technical problems and deployment barriers. To ease the deployment of
        such services, this paper proposes a general, overlay-based Internet Indirection
        Infrastructure (i3) that offers a rendezvous-based communication abstraction.
        Instead of explicitly sending a packet to a destination, each packet is
        associated with an identifier; this identifier is then used by the receiver to
        obtain delivery of the packet. This level of indirection decouples the act of
        sending from the act of receiving, and allows i3 to efficiently support a wide
        variety of fundamental communication services. To demonstrate the feasibility of
        this approach, we have designed and built a prototype based on the Chord lookup
        protocol}, 
  www_section = {indirection, mobility, multicast, network infrastructure, service
        composition}, 
  issn = {1063-6692}, 
  doi = {10.1109/TNET.2004.826279}, 
  url = {http://portal.acm.org/citation.cfm?id=987233$\#$}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/i3.pdf}, 
}
9999
@article{9999,
  title = {The Gnutella Protocol Specification v0.4}, 
  author = {TODO}, 
  journal = {unknown}, 
  year = {2001}, 
  abstract = {A brief description of the gnutella protocol}, 
  url = {http://www9.limewire.com/developer/gnutella_protocol_0.4.pdf}, 
%%%%% ERROR: Missing field
% www_section = {?????},
}
Aad06packetcoding
@booklet{Aad06packetcoding,
  title = {Packet coding for strong anonymity in ad hoc networks}, 
  author = {Imad Aad and Claude Castelluccia and Jean-Pierre Hubaux}, 
  year = {2006}, 
  abstract = {Several techniques to improve anonymity have been proposed in the literature.
        They rely basically on multicast or on onion routing to thwart global attackers
        or local attackers respectively. None of the techniques provide a combined
        solution due to the incompatibility between the two components, as we show in
        this paper. We propose novel packet coding techniques that make the combination
        possible, thus integrating the advantages in a more complete and robust
        solution}, 
  www_section = {anonymity, onion routing, robustness}, 
  url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.88.2407}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.88.2407_0.pdf}, 
}
Aberer03p-grid:a
@booklet{Aberer03p-grid:a,
  title = {P-Grid: A Self-organizing Structured P2P System}, 
  author = {Karl Aberer and Philippe Cudre-Mauroux and Anwitaman Datta and Zoran Despotovic
        and Manfred Hauswirth and Magdalena Punceva and Roman Schmidt}, 
  year = {2003}, 
  abstract = {this paper was supported in part by the National Competence Center in
        Research on Mobile Information and Communication Systems (NCCR-MICS), a center
        supported by the Swiss National Science Foundation under grant number 5005-67322
        and by SNSF grant 2100064994, "Peer-to-Peer Information Systems." messages. From
        the responses it (randomly) selects certain peers to which direct network links
        are established}, 
  www_section = {P2P}, 
  url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.3.5649}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.3.5649.pdf}, 
}
Aberer04multifacetedsimultaneous
@conference{Aberer04multifacetedsimultaneous,
  title = {Multifaceted Simultaneous Load Balancing in DHT-based P2P systems: A new game
        with old balls and bins}, 
  author = {Karl Aberer and Anwitaman Datta and Manfred Hauswirth}, 
  booktitle = {Self-* Properties in Complex Information Systems, {\textquotedblleft}Hot
        Topics{\textquotedblright} series, LNCS}, 
  organization = {Springer}, 
  year = {2004}, 
  publisher = {Springer}, 
  abstract = {In this paper we present and evaluate uncoordinated on-line algorithms for
        simultaneous storage and replication load-balancing in DHT-based peer-to-peer
        systems. We compare our approach with the classical balls into bins model, and
        point out the similarities but also the differences which call for new
        loadbalancing mechanisms specifically targeted at P2P systems. Some of the
        peculiarities of P2P systems, which make our problem even more challenging are
        that both the network membership and the data indexed in the network is dynamic,
        there is neither global coordination nor global information to rely on, and the
        load-balancing mechanism ideally should not compromise the structural properties
        and thus the search efficiency of the DHT, while preserving the semantic
        information of the data (e.g., lexicographic ordering to enable range searches)}, 
  www_section = {distributed hash table, P2P, storage}, 
  url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.9.3746}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/self-star-load-balance.pdf},
}
Acedanski05howgood
@conference{Acedanski05howgood,
  title = {How good is random linear coding based distributed networked storage?}, 
  author = {Szymon Aceda{\'n}ski and Supratim Deb and Muriel M{\'e}dard and Ralf Koetter}, 
  booktitle = {NetCod'05--First Workshop on Network Coding, Theory, and Applications}, 
  organization = {Citeseer}, 
  year = {2005}, 
  month = {April}, 
  address = {Riva del Garda, Italy}, 
  publisher = {Citeseer}, 
  abstract = {We consider the problem of storing a large file or multiple large files in a
        distributed manner over a network. In the framework we consider, there are
        multiple storage locations, each of which only have very limited storage space
        for each file. Each storage location chooses a part (or a coded version of the
        parts) of the file without the knowledge of what is stored in the other
        locations. We want a file-downloader to connect to as few storage locations as
        possible and retrieve the entire file. We compare the performance of three
        strategies: uncoded storage, traditional erasure coding based storage, random
        linear coding based storage motivated by network coding. We demonstrate that, in
        principle, a traditional erasure coding based storage (eg: Reed-Solomon Codes)
        strategy can almost do as well as one can ask for with appropriate choice of
        parameters. However, the cost is a large amount of additional storage space
        required at the centralized server before distribution among multiple locations.
        The random linear coding based strategy performs as well without suffering from
        any such disadvantage. Further, with a probability close to one, the minimum
        number of storage location a downloader needs to connect to (for reconstructing
        the entire file), can be very close to the case where there is complete
        coordination between the storage locations and the downloader. We also argue that
        an uncoded strategy performs poorly}, 
  www_section = {distributed networked storage, limited storage, linear coding, multiple
        storage locations}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/NetCod\%2705\%20-\%20Random\%20linear\%20coding\%20based\%20distributed\%20networked\%20storage.pdf},
}
Acquisti03onthe
@conference{Acquisti03onthe,
  title = {On the Economics of Anonymity}, 
  author = {Alessandro Acquisti and Roger Dingledine and Paul Syverson}, 
  booktitle = {Financial Cryptography. Springer-Verlag, LNCS 2742}, 
  year = {2003}, 
  pages = {84--102}, 
  abstract = {Decentralized anonymity infrastructures are still not in wide use today.
        While there are technical barriers to a secure robust design, our lack of
        understanding of the incentives to participate in such systems remains a major
        roadblock. Here we explore some reasons why anonymity systems are particularly
        hard to deploy, enumerate the incentives to participate either as senders or also
        as nodes, and build a general model to describe the effects of these incentives.
        We then describe and justify some simplifying assumptions to make the model
        manageable, and compare optimal strategies for participants based on a variety of
        scenarios}, 
  url = {http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.13.5636\&rep=rep1\&type=pdf},
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.13.5636.pdf}, 
%%%%% ERROR: Missing field
% www_section = {?????},
}
Acquisti04privacyin
@booklet{Acquisti04privacyin,
  title = {Privacy in Electronic Commerce and the Economics of Immediate Gratification}, 
  author = {Alessandro Acquisti}, 
  year = {2004}, 
  abstract = {Dichotomies between privacy attitudes and behavior have been noted in the
        literature but not yet fully explained. We apply lessons from the research on
        behavioral economics to understand the individual decision making process with
        respect to privacy in electronic commerce. We show that it is unrealistic to
        expect individual rationality in this context. Models of self-control problems
        and immediate gratification offer more realistic descriptions of the decision
        process and are more consistent with currently available data. In particular, we
        show why individuals who may genuinely want to protect their privacy might not do
        so because of psychological distortions well documented in the behavioral
        literature; we show that these distortions may affect not only
        {\textquoteleft}na{\"\i}ve' individuals but also {\textquoteleft}sophisticated'
        ones; and we prove that this may occur also when individuals perceive the risks
        from not protecting their privacy as significant}, 
  url = {http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.58.3760\&rep=rep1\&type=pdf},
%%%%% ERROR: Missing field
% www_section = {?????},
}
Adya:2002:FFA:844128.844130
@article{Adya:2002:FFA:844128.844130,
  title = {FARSITE: Federated, Available, and Reliable Storage for an Incompletely Trusted
        Environment}, 
  author = {Adya, Atul and Bolosky, William J. and Miguel Castro and Cermak, Gerald and
        Chaiken, Ronnie and John R. Douceur and Howell, Jon and Lorch, Jacob R. and
        Marvin Theimer and Roger Wattenhofer}, 
  journal = {ACM SIGOPS Operating Systems Review}, 
  volume = {36}, 
  year = {2002}, 
  month = {December}, 
  address = {New York, NY, USA}, 
  pages = {1--14}, 
  publisher = {ACM}, 
  abstract = {Farsite is a secure, scalable file system that logically functions as a
        centralized file server but is physically distributed among a set of untrusted
        computers. Farsite provides file availability and reliability through randomized
        replicated storage; it ensures the secrecy of file contents with cryptographic
        techniques; it maintains the integrity of file and directory data with a
        Byzantine-fault-tolerant protocol; it is designed to be scalable by using a
        distributed hint mechanism and delegation certificates for pathname translations;
        and it achieves good performance by locally caching file data, lazily propagating
        file updates, and varying the duration and granularity of content leases. We
        report on the design of Farsite and the lessons we have learned by implementing
        much of that design}, 
  www_section = {centralized file server, farsite, file system, randomized replicaed
        storage}, 
  issn = {0163-5980}, 
  doi = {http://doi.acm.org/10.1145/844128.844130}, 
  url = {http://doi.acm.org/10.1145/844128.844130}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/SIGOPS\%20-\%20FARSITE.pdf},
}
Aekaterinidis2006PastryStrings
@conference{Aekaterinidis2006PastryStrings,
  title = {PastryStrings: A Comprehensive Content-Based Publish/Subscribe DHT Network}, 
  author = {Aekaterinidis, Ioannis and Triantafillou, Peter}, 
  booktitle = {Proceedings of the 26th IEEE International Conference on Distributed
        Computing Systems}, 
  organization = {IEEE Computer Society}, 
  year = {2006}, 
  address = {Washington, DC, USA}, 
  pages = {0--23}, 
  publisher = {IEEE Computer Society}, 
  series = {ICDCS '06}, 
  isbn = {0-7695-2540-7}, 
  doi = {10.1109/ICDCS.2006.63}, 
  url = {http://dx.doi.org/10.1109/ICDCS.2006.63}, 
%%%%% ERROR: Missing field
% www_section = {?????},
}
AhmedBoutaba2006DistributedPatternMatching
@conference{AhmedBoutaba2006DistributedPatternMatching,
  title = {Distributed Pattern Matching: A Key to Flexible and Efficient P2P Search}, 
  author = {Ahmed, R. and Boutaba, R.}, 
  booktitle = {2006 IEEE/IFIP Network Operations and Management Symposium NOMS 2006}, 
  organization = {IEEE}, 
  year = {2006}, 
  pages = {198--208}, 
  publisher = {IEEE}, 
  abstract = {Flexibility and efficiency are the prime requirements for any P2P search
        mechanism. Existing P2P systems do not seem to provide satisfactory solution for
        achieving these two conflicting goals. Unstructured search protocols (as adopted
        in Gnutella and FastTrack), provide search flexibility but exhibit poor
        performance characteristics. Structured search techniques (mostly distributed
        hash table (DHT)-based), on the other hand, can efficiently route queries to
        target peers but support exact-match queries only. In this paper we present a
        novel P2P system, called distributed pattern matching system (DPMS), for enabling
        flexible and efficient search. Distributed pattern matching can be used to solve
        problems like wildcard searching (for file-sharing P2P systems), partial service
        description matching (for service discovery systems) etc. DPMS uses a hierarchy
        of indexing peers for disseminating advertised patterns. Patterns are aggregated
        and replicated at each level along the hierarchy. Replication improves
        availability and resilience to peer failure, and aggregation reduces storage
        overhead. An advertised pattern can be discovered using any subset of its 1-bits;
        this allows inexact matching and queries in conjunctive normal form. Search
        complexity (i.e., the number of peers to be probed) in DPMS is O (log N + zetalog
        N/log N), where N is the total number of peers and zeta is proportional to the
        number of matches, required in a search result. The impact of churn problem is
        less severe in DPMS than DHT-based systems. Moreover, DPMS provides guarantee on
        search completeness for moderately stable networks. We demonstrate the
        effectiveness of DPMS using mathematical analysis and simulation results}, 
  www_section = {matching, P2P, search}, 
  isbn = {1-4244-0142-9}, 
  doi = {10.1109/NOMS.2006.1687551}, 
  url = {http://dx.doi.org/10.1109/NOMS.2006.1687551}, 
}
Ahn03k-anonymousmessage
@conference{Ahn03k-anonymousmessage,
  title = {k-Anonymous Message Transmission}, 
  author = {Luis von Ahn and Andrew Bortz and Nicholas J. Hopper}, 
  booktitle = {Conference on Computer and Communications Security}, 
  organization = {ACM New York, NY, USA}, 
  year = {2003}, 
  month = {January}, 
  address = {Washington D.C., USA}, 
  publisher = {ACM New York, NY, USA}, 
  abstract = {Informally, a communication protocol is sender k--anonymous if it can
        guarantee that an adversary, trying to determine the sender of a particular
        message, can only narrow down its search to a set of k suspects. Receiver
        k-anonymity places a similar guarantee on the receiver: an adversary, at best,
        can only narrow down the possible receivers to a set of size k. In this paper we
        introduce the notions of sender and receiver k-anonymity and consider their
        applications. We show that there exist simple and e$\#$cient protocols which are
        k-anonymous for both the sender and the receiver in a model where a polynomial
        time adversary can see all tra$\#$c in the network and can control up to a
        constant fraction of the participants. Our protocol is provably secure,
        practical, and does not require the existence of trusted third parties. This
        paper also provides a conceptually simple augmentation to Chaum's DC-Nets that
        adds robustness against adversaries who attempt to disrupt the protocol through
        perpetual transmission or selective non-participation}, 
  isbn = {1-58113-738-9}, 
  url = {http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.9.9348\&rep=rep1\&type=url\&i=2},
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/k-anonymous_ccs2003.pdf},
%%%%% ERROR: Missing field
% www_section = {?????},
}
Ali:2005:PTA:1082473.1082631
@conference{Ali:2005:PTA:1082473.1082631,
  title = {Preprocessing techniques for accelerating the DCOP algorithm ADOPT}, 
  author = {Ali, Syed and Koenig, Sven and Tambe, Milind}, 
  booktitle = {AAMAS'05--Proceedings of the fourth international joint conference on
        Autonomous agents and multiagent systems}, 
  organization = {ACM}, 
  year = {2005}, 
  month = {July}, 
  address = {Utrecht, Netherlands}, 
  pages = {1041--1048}, 
  publisher = {ACM}, 
  series = {AAMAS '05}, 
  abstract = {Methods for solving Distributed Constraint Optimization Problems (DCOP) have
        emerged as key techniques for distributed reasoning. Yet, their application faces
        significant hurdles in many multiagent domains due to their inefficiency.
        Preprocessing techniques have successfully been used to speed up algorithms for
        centralized constraint satisfaction problems. This paper introduces a framework
        of different preprocessing techniques that are based on dynamic programming and
        speed up ADOPT, an asynchronous complete and optimal DCOP algorithm. We
        investigate when preprocessing is useful and which factors influence the
        resulting speedups in two DCOP domains, namely graph coloring and distributed
        sensor networks. Our experimental results demonstrate that our preprocessing
        techniques are fast and can speed up ADOPT by an order of magnitude}, 
  www_section = {ADOPT algorithm, DCOP, distributed constraint optimization}, 
  isbn = {1-59593-093-0}, 
  doi = {10.1145/1082473.1082631}, 
  url = {http://doi.acm.org/10.1145/1082473.1082631}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/AAMAS\%2705\%20-\%20Accelerating\%20the\%20DCOP\%20algorithm\%20ADOPT.pdf},
}
Amnefelt04keso-
@mastersthesis{Amnefelt04keso-,
  title = {Keso--a Scalable, Reliable and Secure Read/Write Peer-to-Peer File System}, 
  author = {Mattias Amnefelt and Johanna Svenningsson}, 
  school = {KTH/Royal Institute of Technology}, 
  year = {2004}, 
  month = {May}, 
  address = {Stockholm}, 
  pages = {0--77}, 
  type = {Master's Thesis}, 
  abstract = {In this thesis we present the design of Keso, a distributed and completely
        decentralized file system based on the peer-to-peer overlay network DKS. While
        designing Keso we have taken into account many of the problems that exist in
        today's distributed file systems. Traditionally, distributed file systems have
        been built around dedicated file servers which often use expensive hardware to
        minimize the risk of breakdown and to handle the load. System administrators are
        required to monitor the load and disk usage of the file servers and to manually
        add clients and servers to the system. Another drawback with centralized file
        systems are that a lot of storage space is unused on clients. Measurements we
        have taken on existing computer systems has shown that a large part of the
        storage capacity of workstations is unused. In the system we looked at there was
        three times as much storage space available on workstations than was stored in
        the distributed file system. We have also shown that much data stored in a
        production use distributed file system is redundant. The main goals for the
        design of Keso has been that it should make use of spare resources, avoid storing
        unnecessarily redundant data, scale well, be self-organizing and be a secure file
        system suitable for a real world environment. By basing Keso on peer-to-peer
        techniques it becomes highly scalable, fault tolerant and self-organizing. Keso
        is intended to run on ordinary workstations and can make use of the previously
        unused storage space. Keso also provides means for access control and data
        privacy despite being built on top of untrusted components. The file system
        utilizes the fact that a lot of data stored in traditional file systems is
        redundant by letting all files that contains a datablock with the same contents
        reference the same datablock in the file system. This is achieved while still
        maintaining access control and data privacy}, 
  www_section = {decentralized file system, DKS, Keso}, 
  url = {http://mattias.amnefe.lt/keso/}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Amnefelt\%20\%26\%20Svenningsson\%20-\%20Keso.pdf},
}
Anderson96theeternity
@conference{Anderson96theeternity,
  title = {The Eternity Service}, 
  author = {Ross Anderson}, 
  booktitle = {Pragocrypt'96--Proceedings of the 1st International Conference on the Theory
        and Applications of Crytology}, 
  year = {1996}, 
  month = {September}, 
  address = {Prague, CZ}, 
  pages = {242--252}, 
  abstract = {The Internet was designed to provide a communications channel that is as
        resistant to denial of service attacks as human ingenuity can make it. In this
        note, we propose the construction of a storage medium with similar properties.
        The basic idea is to use redundancy and scattering techniques to replicate data
        across a large set of machines (such as the Internet), and add anonymity
        mechanisms to drive up the cost of selective service denial attacks. The detailed
        design of this service is an interesting scientific problem, and is not merely
        academic: the service may be vital in safeguarding individual rights against new
        threats posed by the spread of electronic publishing}, 
  url = {http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.16.1952\&rep=rep1\&type=pdf},
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/eternity.pdf}, 
  www_section = {Unsorted}, 
}
Andrade04whencan
@conference{Andrade04whencan,
  title = {When Can an Autonomous Reputation Scheme Discourage Free-riding in a
        Peer-to-Peer System?}, 
  author = {Nazareno Andrade and Miranda Mowbray and Walfredo Cirne and Francisco
        Brasileiro}, 
  booktitle = {in: CCGRID '04: Proceedings of the 2004 IEEE International Symposium on
        Cluster Computing and the Grid, IEEE Computer Society}, 
  year = {2004}, 
  pages = {440--448}, 
  abstract = {We investigate the circumstances under which it is possible to discourage
        free-riding in a peer-to-peer system for resource-sharing by prioritizing
        resource allocation to peers with higher reputation. We use a model to predict
        conditions necessary for any reputation scheme to succeed in discouraging
        free-riding by this method. We show with simulations that for representative
        cases, a very simple autonomous reputation scheme works nearly as well at
        discouraging free-riding as an ideal reputation scheme. Finally, we investigate
        the expected dynamic behavior of the system}, 
  url = {http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.4.9659\&rep=rep1\&type=pdf},
%%%%% ERROR: Missing field
% www_section = {?????},
}
Andrade:2005:ICB:1080192.1080198
@conference{Andrade:2005:ICB:1080192.1080198,
  title = {Influences on cooperation in BitTorrent communities}, 
  author = {Nazareno Andrade and Miranda Mowbray and Lima, Aliandro and Wagner, Gustavo and
        Ripeanu, Matei}, 
  booktitle = {P2PEcon'05. Proceedings of the 2005 ACM SIGCOMM workshop on Economics of
        peer-to-peer systems}, 
  organization = {ACM}, 
  year = {2005}, 
  month = {August}, 
  address = {Philadelphia, Pennsylvania, USA}, 
  pages = {111--115}, 
  publisher = {ACM}, 
  series = {P2PECON '05}, 
  abstract = {We collect BitTorrent usage data across multiple file-sharing communities and
        analyze the factors that affect users' cooperative behavior. We find evidence
        that the design of the BitTorrent protocol results in increased cooperative
        behavior over other P2P protocols used to share similar content (e.g. Gnutella).
        We also investigate two additional community-specific mechanisms that foster even
        more cooperation}, 
  www_section = {BitTorrent, cooperation, P2P}, 
  isbn = {1-59593-026-4}, 
  doi = {http://doi.acm.org/10.1145/1080192.1080198}, 
  url = {http://doi.acm.org/10.1145/1080192.1080198}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/coopbittorrentcom_0.pdf},
}
Androutsellis-Theotokis:2004:SPC:1041680.1041681
@article{Androutsellis-Theotokis:2004:SPC:1041680.1041681,
  title = {A survey of peer-to-peer content distribution technologies}, 
  author = {Androutsellis-Theotokis, Stephanos and Spinellis, Diomidis}, 
  journal = {ACM Computing Surveys}, 
  volume = {36}, 
  year = {2004}, 
  month = {December}, 
  address = {New York, NY, USA}, 
  pages = {335--371}, 
  publisher = {ACM}, 
  abstract = {Distributed computer architectures labeled "peer-to-peer" are designed for
        the sharing of computer resources (content, storage, CPU cycles) by direct
        exchange, rather than requiring the intermediation or support of a centralized
        server or authority. Peer-to-peer architectures are characterized by their
        ability to adapt to failures and accommodate transient populations of nodes while
        maintaining acceptable connectivity and performance.Content distribution is an
        important peer-to-peer application on the Internet that has received considerable
        research attention. Content distribution applications typically allow personal
        computers to function in a coordinated manner as a distributed storage medium by
        contributing, searching, and obtaining digital content.In this survey, we propose
        a framework for analyzing peer-to-peer content distribution technologies. Our
        approach focuses on nonfunctional characteristics such as security, scalability,
        performance, fairness, and resource management potential, and examines the way in
        which these characteristics are reflected in---and affected by---the
        architectural design decisions adopted by current peer-to-peer systems.We study
        current peer-to-peer systems and infrastructure technologies in terms of their
        distributed object location and routing mechanisms, their approach to content
        replication, caching and migration, their support for encryption, access control,
        authentication and identity, anonymity, deniability, accountability and
        reputation, and their use of resource trading and management schemes}, 
  www_section = {content distribution, distributed hash table, DOLR, grid computing, P2P,
        peer-to-peer networking}, 
  issn = {0360-0300}, 
  doi = {http://doi.acm.org/10.1145/1041680.1041681}, 
  url = {http://doi.acm.org/10.1145/1041680.1041681}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/ACM\%20Computing\%20Surveys\%20-\%20A\%20survey\%20of\%20p2p\%20content\%20distribution\%20technologies.pdf},
}
Antoniadis04anasymptotically
@conference{Antoniadis04anasymptotically,
  title = {An Asymptotically Optimal Scheme for P2P File Sharing}, 
  author = {Panayotis Antoniadis and Costas Courcoubetis and Richard Weber}, 
  booktitle = {2nd Workshop on the Economics of Peer-to-Peer Systems}, 
  year = {2004}, 
  month = {January}, 
  address = {Harvard University}, 
  abstract = {The asymptotic analysis of certain public good models for p2p systems
        suggests that when the aim is to maximize social welfare a fixed contribution
        scheme in terms of the number of files shared can be asymptotically optimal as
        the number of participants grows to infinity. Such a simple scheme eliminates
        free riding, is incentive compatible and obtains a value of social welfare that
        is within o(n) of that obtained by the second-best policy of the corresponding
        mechanism design formulation of the problem. We extend our model to account for
        file popularity, and discuss properties of the resulting equilibria. The fact
        that a simple optimization problem can be used to closely approximate the
        solution of the exact model (which is in most cases practically intractable both
        analytically and computationally), is of great importance for studying several
        interesting aspects of the system. We consider the evolution of the system to
        equilibrium in its early life, when both peers and the system planner are still
        learning about system parameters. We also analyse the case of group formation
        when peers belong to different classes (such as DSL and dial-up users), and it
        may be to their advantage to form distinct groups instead of a larger single
        group, or form such a larger group but avoid disclosing their class. We finally
        discuss the game that occurs when peers know that a fixed fee will be used, but
        the distribution of their valuations is unknown to the system designer}, 
  www_section = {asymptotically optimal, P2P, sharing}, 
  url = {http://www.eecs.harvard.edu/p2pecon/confman/papers}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/optimalscheme04.pdf}, 
}
Aspelund05retrivabilityof
@mastersthesis{Aspelund05retrivabilityof,
  title = {Retrivability of data in ad-hoc backup}, 
  author = {Trond Aspelund}, 
  school = {Oslo University}, 
  volume = {Master}, 
  year = {2005}, 
  type = {Master thesis}, 
  abstract = {This master thesis looks at aspects with backup of data and restore in ad-hoc
        networks. Ad-hoc networks are networks made between arbitrary nodes without any
        form of infrastructure or central control. Backup in such environments would have
        to rely on other nodes to keep backups. The key problem is knowing whom to trust.
        Backup in ad-hoc network is meant to be a method to offer extra security to data
        that is created outside of a controlled environment. The most important aspects
        of backup are the ability to retrieve data after it is lost from the original
        device. In this project an ad-hoc network is simulated, to measure how much of
        the data can be retrieved as a function of the size of the network. The distance
        to the data and how many of the distributed copies are available is measured. The
        network is simulated using User-mode Linux and the centrality and connectivity of
        the simulated network is measured. Finding the device that keeps your data when a
        restoration is needed can be like looking for a needle in a haystack. A simple
        solution to this is to not only rely on the ad-hoc network but also make it
        possible for devices that keep backups to upload data to others or back to a host
        that is available to the source itself}, 
  www_section = {ad-hoc networks}, 
  url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.106.141}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Retrivability_of_data_in_ad-hoc_backup.pdf},
}
Atallah2006
@conference{Atallah2006,
  title = {Secure Collaborative Planning, Forecasting, and Replenishment}, 
  author = {Atallah, Mikhail and Marina Blanton and Vinayak Deshpand and Frikken, Keith and
        Li, Jiangtao and Leroy Schwarz}, 
  booktitle = {Proceedings of Multi-Echelon/Public Applications of Supply Chain Management
        Conference}, 
  year = {2006}, 
  pages = {1--52}, 
  note = {only published on CD}, 
  abstract = {Although the benefits of information sharing between supply-chain partners
        are well known, many companies are averse to share their
        {\textquotedblleft}private{\textquotedblright} information due to fear of adverse
        impact of information leakage. This paper uses techniques from Secure Multiparty
        Computation (SMC) to develop {\textquotedblleft}secure
        protocols{\textquotedblright} for the CPFR (Collaborative Planning, Forecasting,
        and Replenishment) business process. The result is a process that permits
        supply-chain partners to capture all of the benefits of information-sharing and
        collaborative decision-making, but without disclosing their
        {\textquotedblleft}private{\textquotedblright} demandsignal (e.g., promotions)
        and cost information to one another. In our collaborative CPFR) scenario, the
        retailer and supplier engage in SMC protocols that result in: (1) a forecast that
        uses both the retailers and the suppliers observed demand signals to better
        forecast demand; and (2) prescribed order/shipment quantities based on
        system-wide costs and inventory levels (and on the joint forecasts) that minimize
        supply-chain expected cost/period. Our contributions are as follows: (1) we
        demonstrate that CPFR can be securely implemented without disclosing the private
        information of either partner; (2) we show that the CPFR business process is not
        incentive compatible without transfer payments and develop an
        incentive-compatible linear transfer-payment scheme for collaborative
        forecasting; (3) we demonstrate that our protocols are not only secure (i.e.,
        privacy preserving), but that neither partner is able to make accurate inferences
        about the others future demand signals from the outputs of the protocols; and (4)
        we illustrate the benefits of secure collaboration using simulation}, 
  www_section = {chain computation management, CPFR, privacy, secure multi-party
        computation, secure supply, security, SMC}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Secure\%20Collaborative\%20Planning\%20Forecasting\%20and\%20Replenishment.pdf},
}
Atallah:2004:PCF:1029179.1029204
@conference{Atallah:2004:PCF:1029179.1029204,
  title = {Private collaborative forecasting and benchmarking}, 
  author = {Atallah, Mikhail and Bykova, Marina and Li, Jiangtao and Frikken, Keith and
        Topkara, Mercan}, 
  booktitle = {WPES'04--Proceedings of the 2004 ACM workshop on Privacy in the electronic
        society}, 
  organization = {ACM}, 
  year = {2004}, 
  month = {October}, 
  address = {Washington, DC, USA}, 
  pages = {103--114}, 
  publisher = {ACM}, 
  series = {WPES '04}, 
  abstract = {Suppose a number of hospitals in a geographic area want to learn how their
        own heart-surgery unit is doing compared with the others in terms of mortality
        rates, subsequent complications, or any other quality metric. Similarly, a number
        of small businesses might want to use their recent point-of-sales data to
        cooperatively forecast future demand and thus make more informed decisions about
        inventory, capacity, employment, etc. These are simple examples of cooperative
        benchmarking and (respectively) forecasting that would benefit all participants
        as well as the public at large, as they would make it possible for participants
        to avail themselves of more precise and reliable data collected from many
        sources, to assess their own local performance in comparison to global trends,
        and to avoid many of the inefficiencies that currently arise because of having
        less information available for their decision-making. And yet, in spite of all
        these advantages, cooperative benchmarking and forecasting typically do not take
        place, because of the participants' unwillingness to share their information with
        others. Their reluctance to share is quite rational, and is due to fears of
        embarrassment, lawsuits, weakening their negotiating position (e.g., in case of
        over-capacity), revealing corporate performance and strategies, etc. The
        development and deployment of private benchmarking and forecasting
        technologies would allow such collaborations to take place without revealing any
        participant's data to the others, reaping the benefits of collaboration while
        avoiding the drawbacks. Moreover, this kind of technology would empower smaller
        organizations who could then cooperatively base their decisions on a much broader
        information base, in a way that is today restricted to only the largest
        corporations. This paper is a step towards this goal, as it gives protocols for
        forecasting and benchmarking that reveal to the participants the desired answers
        yet do not reveal to any participant any other participant's private data. We
        consider several forecasting methods, including linear regression and time series
        techniques such as moving average and exponential smoothing. One of the novel
        parts of this work, that further distinguishes it from previous work in secure
        multi-party computation, is that it involves floating point arithmetic, in
        particular it provides protocols to securely and efficiently perform division}, 
  www_section = {benchmarking, e-commerce, forecasting, privacy, secure multi-party
        computation, secure protocol, SMC}, 
  isbn = {1-58113-968-3}, 
  doi = {10.1145/1029179.1029204}, 
  url = {http://doi.acm.org/10.1145/1029179.1029204}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/WPES\%2704\%20-\%20Forecasting\%20and\%20benchamking.pdf},
}
AthanRAM07
@conference{AthanRAM07,
  title = {GAS: Overloading a File Sharing Network as an Anonymizing System}, 
  author = {Elias Athanasopoulos and Mema Roussopoulos and Kostas G. Anagnostakis and
        Evangelos P. Markatos}, 
  booktitle = {Proceedings of Second International Workshop on Security, (IWSEC 2007)}, 
  organization = {Springer Berlin / Heidelberg}, 
  year = {2007}, 
  publisher = {Springer Berlin / Heidelberg}, 
  abstract = {Anonymity is considered as a valuable property as far as everyday
        transactions in the Internet are concerned. Users care about their privacy and
        they seek for new ways to keep secret as much as of their personal information
        from third parties. Anonymizing systems exist nowadays that provide users with
        the technology, which is able to hide their origin when they use applications
        such as the World Wide Web or Instant Messaging. However, all these systems are
        vulnerable to a number of attacks and some of them may collapse under a low
        strength adversary. In this paper we explore anonymity from a different
        perspective. Instead of building a new anonymizing system, we try to overload an
        existing file sharing system, Gnutella, and use it for a different purpose. We
        develop a technique that transforms Gnutella as an Anonymizing System (GAS) for a
        single download from the World Wide Web}, 
  www_section = {anonymity, Gnutella}, 
  isbn = {978-3-540-75650-7}, 
  doi = {10.1007/978-3-540-75651-4}, 
  url = {http://www.springerlink.com/content/8120788t0l354vj6/}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/AthanRAM07.pdf}, 
}
Attrapadung:2009:AES:1696791.1696811
@conference{Attrapadung:2009:AES:1696791.1696811,
  title = {Attribute-Based Encryption Supporting Direct/Indirect Revocation Modes}, 
  author = {Attrapadung, Nuttapong and Imai, Hideki}, 
  booktitle = {Proceedings of the 12th IMA International Conference on Cryptography and
        Coding}, 
  organization = {Springer-Verlag}, 
  year = {2009}, 
  month = {December}, 
  address = {Cirencester, UK}, 
  pages = {278--300}, 
  publisher = {Springer-Verlag}, 
  series = {Cryptography and Coding '09}, 
  abstract = {Attribute-based encryption (ABE) enables an access control mechanism over
        encrypted data by specifying access policies among private keys and ciphertexts.
        In this paper, we focus on ABE that supports revocation. Currently, there are two
        available revocable ABE schemes in the literature. Their revocation mechanisms,
        however, differ in the sense that they can be considered as direct and indirect
        methods. Direct revocation enforces revocation directly by the sender
        who specifies the revocation list while encrypting. Indirect revocation
        enforces revocation by the key authority who releases a key update material
        periodically in such a way that only non-revoked users can update their keys
        (hence, revoked users' keys are implicitly rendered useless). An advantage of the
        indirect method over the direct one is that it does not require senders to know
        the revocation list. In contrast, an advantage of the direct method over the
        other is that it does not involve key update phase for all non-revoked users
        interacting with the key authority. In this paper, we present the first
        Hybrid Revocable ABE scheme that allows senders to select on-the-fly
        when encrypting whether to use either direct or indirect revocation mode;
        therefore, it combines best advantages from both methods}, 
  isbn = {978-3-642-10867-9}, 
  doi = {http://dx.doi.org/10.1007/978-3-642-10868-6_17}, 
  url = {http://dx.doi.org/10.1007/978-3-642-10868-6_17}, 
  www_section = {Unsorted}, 
}
Awerbuch04robustdistributed
@conference{Awerbuch04robustdistributed,
  title = {Robust Distributed Name Service}, 
  author = {Awerbuch, Baruch}, 
  booktitle = {In Proc. of the 3rd International Workshop on Peer-to-Peer Systems (IPTPS)}, 
  year = {2004}, 
  pages = {1--8}, 
  url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.142.4900}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/awerbuch-robust.pdf}, 
%%%%% ERROR: Missing field
% www_section = {?????},
}
BM:mixencrypt
@conference{BM:mixencrypt,
  title = {Provably Secure Public-Key Encryption for Length-Preserving Chaumian Mixes}, 
  author = {Bodo M{\"o}ller}, 
  booktitle = {Proceedings of CT-RSA 2003}, 
  organization = {Springer-Verlag, LNCS 2612}, 
  year = {2003}, 
  month = {April}, 
  publisher = {Springer-Verlag, LNCS 2612}, 
  abstract = {Mix chains as proposed by Chaum allow sending untraceable electronic e-mail
        without requiring trust in a single authority: messages are recursively
        public-key encrypted to multiple intermediates (mixes), each of which forwards
        the message after removing one layer of encryption. To conceal as much
        information as possible when using variable (source routed) chains, all messages
        passed to mixes should be of the same length; thus, message length should not
        decrease when a mix transforms an input message into the corresponding output
        message directed at the next mix in the chain. Chaum described an implementation
        for such length-preserving mixes, but it is not secure against active attacks. We
        show how to build practical cryptographically secure lengthpreserving mixes. The
        conventional de nition of security against chosen ciphertext attacks is not
        applicable to length-preserving mixes; we give an appropriate de nition and show
        that our construction achieves provable security}, 
  www_section = {mix chain, public key cryptography}, 
  url = {http://eprints.kfupm.edu.sa/59837/}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/BM-mixencrypt.pdf}, 
}
Badishi:2009:DFC:1550962.1551186
@article{Badishi:2009:DFC:1550962.1551186,
  title = {Deleting files in the Celeste peer-to-peer storage system}, 
  author = {Badishi, Gal and Caronni, Germano and Keidar, Idit and Rom, Raphael and Scott,
        Glenn}, 
  journal = {Journal of Parallel and Distributed Computing}, 
  volume = {69}, 
  year = {2009}, 
  month = {July}, 
  address = {Orlando, FL, USA}, 
  pages = {613--622}, 
  publisher = {Academic Press, Inc}, 
  abstract = {Celeste is a robust peer-to-peer object store built on top of a distributed
        hash table (DHT). Celeste is a working system, developed by Sun Microsystems
        Laboratories. During the development of Celeste, we faced the challenge of
        complete object deletion, and moreover, of deleting ''files'' composed of several
        different objects. This important problem is not solved by merely deleting
        meta-data, as there are scenarios in which all file contents must be deleted,
        e.g., due to a court order. Complete file deletion in a realistic peer-to-peer
        storage system has not been previously dealt with due to the intricacy of the
        problem--the system may experience high churn rates, nodes may crash or have
        intermittent connectivity, and the overlay network may become partitioned at
        times. We present an algorithm that eventually deletes all file contents, data
        and meta-data, in the aforementioned complex scenarios. The algorithm is fully
        functional and has been successfully integrated into Celeste}, 
  www_section = {Celeste, fault-tolerance, peer-to-peer networking, storage}, 
  issn = {0743-7315}, 
  doi = {10.1016/j.jpdc.2009.03.003}, 
  url = {http://dl.acm.org/citation.cfm?id=1550962.1551186}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Journal\%20of\%20Parallel\%20\%26\%20Distributed\%20Computing\%20-\%20Deleting\%20files\%20in\%20the\%20Celeste\%20p2p\%20storage\%20systems.pdf},
}
Banner:2007:MRA:1279660.1279673
@article{Banner:2007:MRA:1279660.1279673,
  title = {Multipath routing algorithms for congestion minimization}, 
  author = {Banner, Ron and Orda, Ariel}, 
  journal = {IEEE/ACM Trans. Netw}, 
  volume = {15}, 
  year = {2007}, 
  month = {April}, 
  address = {Piscataway, NJ, USA}, 
  pages = {413--424}, 
  publisher = {IEEE Press}, 
  abstract = {Unlike traditional routing schemes that route all traffic along a single
        path, multipath routing strategies split the traffic among several paths in order
        to ease congestion. It has been widely recognized that multipath routing can be
        fundamentally more efficient than the traditional approach of routing along
        single paths. Yet, in contrast to the single-path routing approach, most studies
        in the context of multipath routing focused on heuristic methods. We demonstrate
        the significant advantage of optimal (or near optimal) solutions. Hence, we
        investigate multipath routing adopting a rigorous (theoretical) approach. We
        formalize problems that incorporate two major requirements of multipath routing.
        Then, we establish the intractability of these problems in terms of computational
        complexity. Finally, we establish efficient solutions with proven performance
        guarantees}, 
  www_section = {computer networks, congestion avoidance, routing protocols}, 
  issn = {1063-6692}, 
  doi = {http://dx.doi.org/10.1109/TNET.2007.892850}, 
  url = {http://dx.doi.org/10.1109/TNET.2007.892850}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/IEEE\%E2\%81\%84ACM\%20Banner\%20\%26\%20Orda.pdf},
}
Barreto04areplicated
@conference{Barreto04areplicated,
  title = {A Replicated File System for Resource Constrained Mobile Devices}, 
  author = {Jo{\~a}o Barreto and Paulo Ferreira}, 
  booktitle = {Proceedings of IADIS Applied Computing}, 
  year = {2004}, 
  abstract = {The emergence of more powerful and resourceful mobile devices, as well as new
        wireless communication technologies, is turning the concept of ad-hoc networking
        into a viable and promising possibility for ubiquitous information sharing.
        However, the inherent characteristics of ad-hoc networks bring up new challenges
        for which most conventional systems don't provide an appropriate response.
        Namely, the lack of a pre-existing infrastructure, the high topological dynamism
        of these networks, the relatively low bandwidth of wireless links, as well as the
        limited storage and energy resources of mobile devices are issues that strongly
        affect the efficiency of any distributed system intended to provide ubiquitous
        information sharing. In this paper we describe Haddock-FS, a transparent
        replicated file system designed to support collaboration in the novel usage
        scenarios enabled by mobile environments. Haddock-FS is based on a highly
        available optimistic consistency protocol. In order to effectively cope with the
        network bandwidth and device memory constraints of these environments, Haddock-FS
        employs a limited size log truncation scheme and a cross-file, cross-version
        content similarity exploitation mechanism}, 
  www_section = {ad-hoc networks, ubiquitous computing}, 
  url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.144.9141}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.144.9141.pdf}, 
}
Bartolini:2005:SFA:2167504.2167521
@book{Bartolini:2005:SFA:2167504.2167521,
  title = {A software framework for automated negotiation}, 
  author = {Bartolini, Claudio and Preist, Chris and Nicholas R Jennings}, 
  booktitle = {Software Engineering for Multi-Agent Systems III}, 
  organization = {Springer-Verlag}, 
  volume = {3390}, 
  year = {2005}, 
  address = {Berlin, Heidelberg}, 
  chapter = {A software framework for automated negotiation}, 
  pages = {213--235}, 
  editor = {Choren, Ricardo and Garcia, Alessandro and Lucena, Carlos and Romanovsky,
        Alexander}, 
  publisher = {Springer-Verlag}, 
  series = {Lecture Notes in Computer Science}, 
  abstract = {If agents are to negotiate automatically with one another they must share a
        negotiation mechanism, specifying what possible actions each party can take at
        any given time, when negotiation terminates, and what is the structure of the
        resulting agreements. Current standardization activities such as FIPA [2] and
        WS-Agreement [3] represent this as a negotiation protocol specifying the flow of
        messages. However, they omit other aspects of the rules of negotiation (such as
        obliging a participant to improve on a previous offer), requiring these to be
        represented implicitly in an agent's design, potentially resulting
        incompatibility, maintenance and re-usability problems. In this chapter, we
        propose an alternative approach, allowing all of a mechanism to be formal and
        explicit. We present (i) a taxonomy of declarative rules which can be used to
        capture a wide variety of negotiation mechanisms in a principled and
        well-structured way; (ii) a simple interaction protocol, which is able to support
        any mechanism which can be captured using the declarative rules; (iii) a software
        framework for negotiation that allows agents to effectively participate in
        negotiations defined using our rule taxonomy and protocol and (iv) a language for
        expressing aspects of the negotiation based on OWL-Lite [4]. We provide examples
        of some of the mechanisms that the framework can support}, 
  www_section = {framework, negotiation}, 
  isbn = {3-540-24843-9}, 
  url = {http://dl.acm.org/citation.cfm?id=2167504.2167521}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/SELMAS\%20-\%20Software\%20framework\%20for\%20automated\%20negotiation.pdf},
}
Baset04ananalysis
@conference{Baset04ananalysis,
  title = {An Analysis of the Skype Peer-to-Peer Internet Telephony Protocol}, 
  author = {Salman A. Baset and Henning G. Schulzrinne}, 
  booktitle = {INFOCOM 2006. Proceedings of the 25th Annual Joint Conference of the IEEE
        Computer and Communications Societies}, 
  year = {2004}, 
  month = {April}, 
  address = {Barcelona, Catalunya, Spain}, 
  abstract = {Skype is a peer-to-peer VoIP client developed by KaZaa in 2003. Skype claims
        that it can work almost seamlessly across NATs and firewalls and has better voice
        quality than the MSN and Yahoo IM applications. It encrypts calls end-to-end, and
        stores user information in a decentralized fashion. Skype also supports instant
        messaging and conferencing. This report analyzes key Skype functions such as
        login, NAT and firewall traversal, call establishment, media transfer, codecs,
        and conferencing under three different network setups. Analysis is performed by
        careful study of Skype network traffic}, 
  www_section = {P2P, VoIP}, 
  url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.84.2433}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/cucs-039-04.pdf}, 
}
Batten01pstore:a
@booklet{Batten01pstore:a,
  title = {pStore: A Secure Peer-to-Peer Backup System}, 
  author = {Christopher Batten and Kenneth Barr and Arvind Saraf and Stanley Trepetin}, 
  year = {2001}, 
  abstract = {In an effort to combine research in peer-to-peer systems with techniques for
        incremental backup systems, we propose pStore: a secure distributed backup system
        based on an adaptive peer-to-peer network. pStore exploits unused personal hard
        drive space attached to the Internet to provide the distributed redundancy needed
        for reliable and effective data backup. Experiments on a 30 node network show
        that 95\% of the files in a 13 MB dataset can be retrieved even when 7 of the
        nodes have failed. On top of this reliability, pStore includes support for file
        encryption, versioning, and secure sharing. Its custom versioning system permits
        arbitrary version retrieval similar to CVS. pStore provides this functionality at
        less than 10\% of the network bandwidth and requires 85\% less storage capacity
        than simpler local tape backup schemes for a representative workload}, 
  www_section = {P2P, robustness}, 
  url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.12.3444}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.12.3444.pdf}, 
}
Bauer03newcovert
@conference{Bauer03newcovert,
  title = {New Covert Channels in HTTP: Adding Unwitting Web Browsers to Anonymity Sets}, 
  author = {Matthias Bauer}, 
  booktitle = {In Proceedings of the Workshop on Privacy in the Electronic Society (WPES
        2003)}, 
  organization = {ACM Press}, 
  year = {2003}, 
  pages = {72--78}, 
  publisher = {ACM Press}, 
  abstract = {This paper presents new methods enabling anonymous communication on the
        Internet. We describe a new protocol that allows us to create an anonymous
        overlay network by exploiting the web browsing activities of regular users. We
        show that the overlay network provides an anonymity set greater than the set of
        senders and receivers in a realistic threat model. In particular, the protocol
        provides unobservability in our threat model}, 
  url = {http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.4.6246\&rep=rep1\&type=pdf},
%%%%% ERROR: Missing field
% www_section = {?????},
}
BecchiCrowley2008EfficientRegexEval
@conference{BecchiCrowley2008EfficientRegexEval,
  title = {Efficient regular expression evaluation: theory to practice}, 
  author = {Becchi, Michela and Crowley, Patrick}, 
  booktitle = {Proceedings of the 4th ACM/IEEE Symposium on Architectures for Networking
        and Communications Systems}, 
  organization = {ACM}, 
  year = {2008}, 
  address = {New York, NY, USA}, 
  pages = {50--59}, 
  publisher = {ACM}, 
  series = {ANCS '08}, 
  isbn = {978-1-60558-346-4}, 
  doi = {10.1145/1477942.1477950}, 
  url = {http://doi.acm.org/10.1145/1477942.1477950}, 
%%%%% ERROR: Missing field
% www_section = {?????},
}
Beimel01busesfor
@article{Beimel01busesfor,
  title = {Buses for Anonymous Message Delivery}, 
  author = {Amos Beimel and Shlomi Dolev}, 
  journal = {Journal of Cryptology}, 
  volume = {16}, 
  year = {2001}, 
  pages = {0--2003}, 
  abstract = {Applies graph theory to anonymity. The paper suffers from the fundamental
        problem that it does not discuss attacks on the scheme, and there are a couple of
        pretty basic ways to break anonymity. Also, the scheme uses lots of traffic; some
        variants end up looking much like a pipenet}, 
  url = {http://gecko.cs.purdue.edu/gnet/papers/BD.pdf}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/BD.pdf}, 
%%%%% ERROR: Missing field
% www_section = {?????},
}
Bellovin2007
@booklet{Bellovin2007,
  title = {Privacy-enhanced searches using encrypted Bloom filters}, 
  author = {Bellovin, Steven M. and Cheswick, William R.}, 
  year = {2007}, 
  pages = {1--16}, 
  publisher = {Columbia University CUCS-034-07}, 
%%%%% ERROR: Missing field
% www_section = {?????},
}
Ben-David:2008:FSS:1455770.1455804
@conference{Ben-David:2008:FSS:1455770.1455804,
  title = {FairplayMP: a system for secure multi-party computation}, 
  author = {Ben-David, Assaf and Nisan, Noam and Pinkas, Benny}, 
  booktitle = {CCS'08--Proceedings of the 15th ACM conference on Computer and
        communications security}, 
  organization = {ACM}, 
  year = {2008}, 
  month = {October}, 
  address = {Alexandria, VA, USA}, 
  pages = {257--266}, 
  publisher = {ACM}, 
  series = {CCS '08}, 
  abstract = {We present FairplayMP (for "Fairplay Multi-Party"), a system for secure
        multi-party computation. Secure computation is one of the great achievements of
        modern cryptography, enabling a set of untrusting parties to compute any function
        of their private inputs while revealing nothing but the result of the function.
        In a sense, FairplayMP lets the parties run a joint computation that emulates a
        trusted party which receives the inputs from the parties, computes the function,
        and privately informs the parties of their outputs. FairplayMP operates by
        receiving a high-level language description of a function and a configuration
        file describing the participating parties. The system compiles the function into
        a description as a Boolean circuit, and perform a distributed evaluation of the
        circuit while revealing nothing else. FairplayMP supplements the Fairplay system
        [16], which supported secure computation between two parties. The underlying
        protocol of FairplayMP is the Beaver-Micali-Rogaway (BMR) protocol which runs in
        a constant number of communication rounds (eight rounds in our implementation).
        We modified the BMR protocol in a novel way and considerably improved its
        performance by using the Ben-Or-Goldwasser-Wigderson (BGW) protocol for the
        purpose of constructing gate tables. We chose to use this protocol since we
        believe that the number of communication rounds is a major factor on the overall
        performance of the protocol. We conducted different experiments which measure the
        effect of different parameters on the performance of the system and demonstrate
        its scalability. (We can now tell, for example, that running a second-price
        auction between four bidders, using five computation players, takes about 8
        seconds.)}, 
  www_section = {cryptography, secure multi-party computation, SMC}, 
  isbn = {978-1-59593-810-7}, 
  doi = {10.1145/1455770.1455804}, 
  url = {http://doi.acm.org/10.1145/1455770.1455804}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/CCS\%2708\%20-\%20FairplayMP.pdf},
}
Bharambe:2005:OBP:1064212.1064273
@conference{Bharambe:2005:OBP:1064212.1064273,
  title = {Some observations on BitTorrent performance}, 
  author = {Bharambe, Ashwin R. and Herley, Cormac and Venkata N. Padmanabhan}, 
  booktitle = {Proceedings of the 2005 ACM SIGMETRICS International Conference on
        Measurement and Modeling of Computer Systems}, 
  organization = {ACM}, 
  year = {2005}, 
  month = {June}, 
  address = {New York, NY, USA}, 
  pages = {398--399}, 
  publisher = {ACM}, 
  series = {SIGMETRICS '05}, 
  abstract = {In this paper, we present a simulation-based study of BitTorrent. Our results
        confirm that BitTorrent performs near-optimally in terms of uplink bandwidth
        utilization and download time, except under certain extreme conditions. On
        fairness, however, our work shows that low bandwidth peers systematically
        download more than they upload to the network when high bandwidth peers are
        present. We find that the rate-based tit-for-tat policy is not effective in
        preventing unfairness. We show how simple changes to the tracker and a stricter,
        block-based tit-for-tat policy, greatly improves fairness, while maintaining high
        utilization}, 
  www_section = {bandwidth utilization, BitTorrent, fairness}, 
  isbn = {1-59593-022-1}, 
  doi = {http://doi.acm.org/10.1145/1064212.1064273}, 
  url = {http://doi.acm.org/10.1145/1064212.1064273}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/SIGMETRICS\%2705\%20-\%20Bharambe\%2C\%20Herley\%20\%26\%20Padmanabhan.pdf},
}
Bickson05theemule
@article{Bickson05theemule,
  title = {The eMule Protocol Specification}, 
  author = {Yoram Kulbak and Danny Bickson}, 
  journal = {unknown}, 
  institution = {Leibniz Center, School of Computer Science and Engineering, The Hebrew
        University}, 
  number = {TR-2005-03}, 
  year = {2005}, 
  month = {January}, 
  address = {Jerusalem, Israel}, 
  type = {Tech report}, 
  abstract = {this document under the terms of the GNU Free Documentation License, Version
        1.2 or any later version published by the Free Software Foundation; with no
        Invariant Sections, no Front-Cover Texts, and no Back-Cover Texts. A copy of the
        license is included in the section entitle "GNU Free Documentation License"}, 
  url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.60.7750}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.60.7750_0.pdf}, 
%%%%% ERROR: Missing field
% www_section = {?????},
}
Binzenhofer:2007:ECS:1769187.1769257
@conference{Binzenhofer:2007:ECS:1769187.1769257,
  title = {Estimating churn in structured P2P networks}, 
  author = {Binzenh{\"o}fer, Andreas and Leibnitz, Kenji}, 
  booktitle = {ITC-20'07--Proceedings of the 20th International Teletraffic Conference on
        Managing Traffic Performance in Converged Networks}, 
  organization = {Springer-Verlag}, 
  year = {2007}, 
  month = {June}, 
  address = {Ottawa, Canada}, 
  pages = {630--641}, 
  publisher = {Springer-Verlag}, 
  series = {ITC20'07}, 
  abstract = {In structured peer-to-peer (P2P) networks participating peers can join or
        leave the system at arbitrary times, a process which is known as churn. Many
        recent studies revealed that churn is one of the main problems faced by any
        Distributed Hash Table (DHT). In this paper we discuss different possibilities of
        how to estimate the current churn rate in the system. In particular, we show how
        to obtain a robust estimate which is independent of the implementation details of
        the DHT. We also investigate the trade-offs between accuracy, overhead, and
        responsiveness to changes}, 
  www_section = {churn, distributed hash table, P2P, peer-to-peer networking}, 
  isbn = {978-3-540-72989-1}, 
  url = {http://dl.acm.org/citation.cfm?id=1769187.1769257}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/ITC-20\%2707\%20-\%20Estimating\%20churn\%20in\%20structured\%20p2p\%20networks.pdf},
}
Blake:2003:HAS:1251054.1251055
@conference{Blake:2003:HAS:1251054.1251055,
  title = {High Availability, Scalable Storage, Dynamic Peer Networks: Pick Two}, 
  author = {Blake, Charles and Rodrigues, Rodrigo}, 
  booktitle = {HotOS IX--Proceedings of the 9th conference on Hot Topics in Operating
        Systems}, 
  organization = {USENIX Association}, 
  year = {2003}, 
  month = {May}, 
  address = {Lihue, Hawaii, USA}, 
  pages = {1--1}, 
  publisher = {USENIX Association}, 
  abstract = {Peer-to-peer storage aims to build large-scale, reliable and available
        storage from many small-scale unreliable, low-availability distributed hosts.
        Data redundancy is the key to any data guarantees. However, preserving redundancy
        in the face of highly dynamic membership is costly. We use a simple resource
        usage model to measured behavior from the Gnutella file-sharing network to argue
        that large-scale cooperative storage is limited by likely dynamics and
        cross-system bandwidth -- not by local disk space. We examine some bandwidth
        optimization strategies like delayed response to failures, admission control, and
        load-shifting and find that they do not alter the basic problem. We conclude that
        when redundancy, data scale, and dynamics are all high, the needed cross-system
        bandwidth is unreasonable}, 
  www_section = {distributed hosts, dynamic peer network, peer-to-peer storage,
        redundancy}, 
  url = {http://dl.acm.org/citation.cfm?id=1251054.1251055}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/HotOS\%20IX\%20-\%20High\%20available\%2C\%20scalable\%20storage\%2C\%20dynamic\%20peer\%20networks.pdf},
}
Bloom70space/timetrade-offs
@article{Bloom70space/timetrade-offs,
  title = {Space/Time Trade-offs in Hash Coding with Allowable Errors}, 
  author = {Burton H. Bloom}, 
  journal = {Communications of the ACM}, 
  volume = {13}, 
  year = {1970}, 
  pages = {422--426}, 
  abstract = {this paper trade-offs among certain computational factors in hash coding are
        analyzed. The paradigm problem considered is that of testing a series of messages
        one-by-one for membership in a given set of messages. Two new hash- coding
        methods are examined and compared with a particular conventional hash-coding
        method. The computational factors considered are the size of the hash area
        (space), the time required to identify a message as a nonmember of the given set
        (reject time), and an allowable error frequency}, 
  www_section = {Bloom filter, compression}, 
  url = {http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.20.2080\&rep=rep1\&type=pdf},
}
Bogetoft:2009:SMC:1601990.1602018
@book{Bogetoft:2009:SMC:1601990.1602018,
  title = {Financial Cryptography and Data Security}, 
  author = {Bogetoft, Peter and Christensen, Dan Lund and Damg{\'a}rd, Ivan and Geisler,
        Martin and Jakobsen, Thomas and Kr{\o}igaard, Mikkel and Nielsen, Janus Dam and
        Nielsen, Jesper Buus and Nielsen, Kurt and Pagter, Jakob and Schwartzbach,
        Michael and Toft, Tomas}, 
  booktitle = {Financial Cryptography and Data Security}, 
  organization = {Springer-Verlag}, 
  volume = {6052}, 
  year = {2009}, 
  address = {Berlin, Heidelberg}, 
  chapter = {Secure Multiparty Computation Goes Live}, 
  edition = {1st}, 
  pages = {325--343}, 
  editor = {Roger Dingledine and Philippe Golle}, 
  publisher = {Springer-Verlag}, 
  series = {Lecture Notes in Computer Science}, 
  abstract = {This book constitutes the thoroughly refereed post-conference proceedings of
        the 14th International Conference on Financial Cryptography and Data Security, FC
        2010, held in Tenerife, Canary Islands, Spain in January 2010. The 19 revised
        full papers and 15 revised short papers presented together with 1 panel report
        and 7 poster papers were carefully reviewed and selected from 130 submissions.
        The papers cover all aspects of securing transactions and systems and feature
        current research focusing on both fundamental and applied real-world deployments
        on all aspects surrounding commerce security}, 
  www_section = {anonymous credentials, bilinear gruop, privacy, secret sharing, SMC,
        symbolic evaluation}, 
  isbn = {978-3-642-03548-7}, 
  doi = {10.1007/978-3-642-03549-4_20}, 
  url = {http://dx.doi.org/10.1007/978-3-642-03549-4_20}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Bogetoft\%20et\%20al.\%20-\%20Secure\%20multiparty\%20computation\%20goes\%20live.pdf},
}
Boldyreva:2008:IEE:1455770.1455823
@conference{Boldyreva:2008:IEE:1455770.1455823,
  title = {Identity-based encryption with efficient revocation}, 
  author = {Boldyreva, Alexandra and Goyal, Vipul and Kumar, Virendra}, 
  booktitle = {CCS'08--Proceedings of the 15th ACM Conference on Computer and
        Communications Security}, 
  organization = {ACM}, 
  year = {2008}, 
  month = {October}, 
  address = {Alexandria, VA, USA}, 
  pages = {417--426}, 
  publisher = {ACM}, 
  series = {CCS '08}, 
  abstract = {Identity-based encryption (IBE) is an exciting alternative to public-key
        encryption, as IBE eliminates the need for a Public Key Infrastructure (PKI). The
        senders using an IBE do not need to look up the public keys and the corresponding
        certificates of the receivers, the identities (e.g. emails or IP addresses) of
        the latter are sufficient to encrypt. Any setting, PKI- or identity-based, must
        provide a means to revoke users from the system. Efficient revocation is a
        well-studied problem in the traditional PKI setting. However in the setting of
        IBE, there has been little work on studying the revocation mechanisms. The most
        practical solution requires the senders to also use time periods when encrypting,
        and all the receivers (regardless of whether their keys have been compromised or
        not) to update their private keys regularly by contacting the trusted authority.
        We note that this solution does not scale well -- as the number of users
        increases, the work on key updates becomes a bottleneck. We propose an IBE scheme
        that significantly improves key-update efficiency on the side of the trusted
        party (from linear to logarithmic in the number of users), while staying
        efficient for the users. Our scheme builds on the ideas of the Fuzzy IBE
        primitive and binary tree data structure, and is provably secure}, 
  www_section = {IBE, identity-based encryption, provable security, revocation}, 
  isbn = {978-1-59593-810-7}, 
  doi = {http://doi.acm.org/10.1145/1455770.1455823}, 
  url = {http://doi.acm.org/10.1145/1455770.1455823}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/CCS\%2708\%20-\%20Identity-based\%20encryption\%20with\%20efficient\%20revocation.pdf},
}
BonehGolle:psp2002
@conference{BonehGolle:psp2002,
  title = {Almost Entirely Correct Mixing With Application to Voting}, 
  author = {Dan Boneh and Philippe Golle}, 
  booktitle = {Proceedings of the 9th ACM Conference on Computer and Communications
        Security (CCS 2002)}, 
  organization = {ACM New York, NY, USA}, 
  year = {2002}, 
  month = {November}, 
  address = {Washington, DC}, 
  pages = {68--77}, 
  editor = {Vijay Atluri}, 
  publisher = {ACM New York, NY, USA}, 
  abstract = {In order to design an exceptionally efficient mix network, both
        asymptotically and in real terms, we develop the notion of almost entirely
        correct mixing, and propose a new mix network that is almost entirely correct. In
        our new mix, the real cost of proving correctness is orders of magnitude faster
        than all other mix nets. The trade-off is that our mix only guarantees "almost
        entirely correct" mixing, i.e it guarantees that the mix network processed
        correctly all inputs with high (but not overwhelming) probability. We use a new
        technique for verifying correctness. This new technique consists of computing the
        product of a random subset of the inputs to a mix server, then require the mix
        server to produce a subset of the outputs of equal product. Our new mix net is of
        particular value for electronic voting, where a guarantee of almost entirely
        correct mixing may well be sufficient to announce instantly the result of a large
        election. The correctness of the result can later be verified beyond a doubt
        using any one of a number of much slower proofs of perfect-correctness, without
        having to mix the ballots again}, 
  www_section = {electronic voting}, 
  isbn = {1-58113-612-9}, 
  doi = {10.1145/586110.586121}, 
  url = {http://portal.acm.org/citation.cfm?id=586121}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/BonehGolle-psp2002.pdf},
}
Borisov:CSD-05-1390
@booklet{Borisov:CSD-05-1390,
  title = {Anonymity in Structured Peer-to-Peer Networks}, 
  author = {Borisov, Nikita and Waddle, Jason}, 
  number = {UCB/CSD-05-1390}, 
  year = {2005}, 
  month = {May}, 
  publisher = {EECS Department, University of California, Berkeley}, 
  abstract = {Existing peer-to-peer systems that aim to provide anonymity to its users are
        based on networks with unstructured or loosely-structured routing algorithms.
        Structured routing offers performance and robustness guarantees that these
        systems are unable to achieve. We therefore investigate adding anonymity support
        to structured peer-to-peer networks. We apply an entropy-based anonymity metric
        to Chord and use this metric to quantify the improvements in anonymity afforded
        by several possible extensions. We identify particular properties of Chord that
        have the strongest effect on anonymity and propose a routing extension that
        allows a general trade-off between anonymity and performance. Our results should
        be applicable to other structured peer-to-peer systems}, 
  url = {http://www.eecs.berkeley.edu/Pubs/TechRpts/2005/6509.html}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/CSD-05-1390.pdf}, 
%%%%% ERROR: Missing field
% www_section = {?????},
}
Boulkenafed02adhocfs:sharing
@booklet{Boulkenafed02adhocfs:sharing,
  title = {AdHocFS: Sharing Files in WLANs}, 
  author = {Malika Boulkenafed and Valerie Issarny}, 
  year = {2002}, 
  abstract = {This paper presents the ADHOCFS file system for mobileusers, which realizes
        transparent, adaptive file accessaccording to the users' specific situations
        (e.g., device inuse, network connectivity, etc).The paper concentratesmore
        specifically on the support of ADHOCFS for collaborativefile sharing within ad
        hoc groups of trusted nodesthat are in the local communication of each other
        using theunderlying ad hoc network, which has not been addressedin the past}, 
  www_section = {ad-hoc networks}, 
  isbn = {0-7695-1938-5}, 
  url = {http://portal.acm.org/citation.cfm?id=825345}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.13.9956.pdf}, 
}
Broadening2013Chatzikokolakis
@book{Broadening2013Chatzikokolakis,
  title = {Broadening the Scope of Differential Privacy Using Metrics}, 
  author = {Chatzikokolakis, Konstantinos and Andr{\'e}s, MiguelE. and Bordenabe,
        Nicol{\'a}sEmilio and Palamidessi, Catuscia}, 
  booktitle = {Privacy Enhancing Technologies}, 
  organization = {Springer Berlin Heidelberg}, 
  volume = {7981}, 
  year = {2013}, 
  pages = {82--102}, 
  editor = {De Cristofaro, Emiliano and Wright, Matthew}, 
  publisher = {Springer Berlin Heidelberg}, 
  series = {Lecture Notes in Computer Science}, 
  abstract = {Differential Privacy is one of the most prominent frameworks used to deal
        with disclosure prevention in statistical databases. It provides a formal privacy
        guarantee, ensuring that sensitive information relative to individuals cannot be
        easily inferred by disclosing answers to aggregate queries. If two databases are
        adjacent, i.e. differ only for an individual, then the query should not allow to
        tell them apart by more than a certain factor. This induces a bound also on the
        distinguishability of two generic databases, which is determined by their
        distance on the Hamming graph of the adjacency relation. In this paper we explore
        the implications of differential privacy when the indistinguishability
        requirement depends on an arbitrary notion of distance. We show that we can
        naturally express, in this way, (protection against) privacy threats that cannot
        be represented with the standard notion, leading to new applications of the
        differential privacy framework. We give intuitive characterizations of these
        threats in terms of Bayesian adversaries, which generalize two interpretations of
        (standard) differential privacy from the literature. We revisit the well-known
        results stating that universally optimal mechanisms exist only for counting
        queries: We show that, in our extended setting, universally optimal mechanisms
        exist for other queries too, notably sum, average, and percentile queries. We
        explore various applications of the generalized definition, for statistical
        databases as well as for other areas, such that geolocation and smart metering}, 
  isbn = {978-3-642-39076-0}, 
  doi = {10.1007/978-3-642-39077-7_5}, 
  url = {http://dx.doi.org/10.1007/978-3-642-39077-7_5}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Brodening2013Chatzikokolakis.pdf},
  www_section = {Unsorted}, 
}
Buchegger03theeffect
@conference{Buchegger03theeffect,
  title = {The Effect of Rumor Spreading in Reputation Systems for Mobile Ad-Hoc Networks}, 
  author = {Sonja Buchegger and Jean-Yves Le Boudec}, 
  booktitle = {In Proceedings of WiOpt {\textquoteleft}03: Modeling and Optimization in
        Mobile, Ad Hoc and Wireless Networks{\textquotedblright}, Sophia-Antipolis}, 
  year = {2003}, 
  abstract = {Mobile ad-hoc networks rely on the cooperation of nodes for routing and
        forwarding. For individual nodes there are however several advantages resulting
        from noncooperation, the most obvious being power saving. Nodes that act
        selfishly or even maliciously pose a threat to availability in mobile ad-hoc
        networks. Several approaches have been proposed to detect noncooperative nodes.
        In this paper, we investigate the effect of using rumors with respect to the
        detection time of misbehaved nodes as well as the robustness of the reputation
        system against wrong accusations. We propose a Bayesian approach for reputation
        representation, updates, and view integration. We also present a mechanism to
        detect and exclude potential lies. The simulation results indicate that by using
        this Bayesian approach, the reputation system is robust against slander while
        still benefitting from the speed-up in detection time provided by the use of
        rumors}, 
  www_section = {ad-hoc networks, reputation, robustness}, 
  url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.13.9006}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.13.9006_0.pdf}, 
}
Burkhart:2010:SPA:1929820.1929840
@conference{Burkhart:2010:SPA:1929820.1929840,
  title = {SEPIA: privacy-preserving aggregation of multi-domain network events and
        statistics}, 
  author = {Burkhart, Martin and Strasser, Mario and Many, Dilip and Dimitropoulos,
        Xenofontas}, 
  booktitle = {Proceedings of the 19th USENIX conference on Security}, 
  organization = {USENIX Association}, 
  year = {2010}, 
  month = {August}, 
  address = {Washington, DC, USA}, 
  pages = {15--15}, 
  publisher = {USENIX Association}, 
  series = {USENIX Security'10}, 
  abstract = {Secure multiparty computation (MPC) allows joint privacy-preserving
        computations on data of multiple parties. Although MPC has been studied
        substantially, building solutions that are practical in terms of computation and
        communication cost is still a major challenge. In this paper, we investigate the
        practical usefulness of MPC for multi-domain network security and monitoring. We
        first optimize MPC comparison operations for processing high volume data in near
        real-time. We then design privacy-preserving protocols for event correlation and
        aggregation of network traffic statistics, such as addition of volume metrics,
        computation of feature entropy, and distinct item count. Optimizing performance
        of parallel invocations, we implement our protocols along with a complete set of
        basic operations in a library called SEPIA. We evaluate the running time and
        bandwidth requirements of our protocols in realistic settings on a local cluster
        as well as on PlanetLab and show that they work in near real-time for up to 140
        input providers and 9 computation nodes. Compared to implementations using
        existing general-purpose MPC frameworks, our protocols are significantly faster,
        requiring, for example, 3 minutes for a task that takes 2 days with
        general-purpose frameworks. This improvement paves the way for new applications
        of MPC in the area of networking. Finally, we run SEPIA's protocols on real
        traffic traces of 17 networks and show how they provide new possibilities for
        distributed troubleshooting and early anomaly detection}, 
  www_section = {privacy, secure multi-party computation, SMC}, 
  isbn = {888-7-6666-5555-4}, 
  url = {http://dl.acm.org/citation.cfm?id=1929820.1929840}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/USENIX\%20Security\%2710\%20-\%20SEPIA.pdf},
}
Bustamante04wayback:a
@conference{Bustamante04wayback:a,
  title = {Wayback: A User-level Versioning File System for Linux}, 
  author = {Fabian Bustamante and Brian Cornell and Brian Cornell and Peter Dinda and Peter
        Dinda and Fabian Bustamante}, 
  booktitle = {In Proceedings of USENIX 2004 (Freenix Track)}, 
  year = {2004}, 
  abstract = {In a typical file system, only the current version of a file (or directory)
        is available. In Wayback, a user can also access any previous version, all the
        way back to the file's creation time. Versioning is done automatically at the
        write level: each write to the file creates a new version. Wayback implements
        versioning using an undo log structure, exploiting the massive space available on
        modern disks to provide its very useful functionality. Wayback is a user-level
        file system built on the FUSE framework that relies on an underlying file system
        for access to the disk. In addition to simplifying Wayback, this also allows it
        to extend any existing file system with versioning: after being mounted, the file
        system can be mounted a second time with versioning. We describe the
        implementation of Wayback, and evaluate its performance using several
        benchmarks}, 
  www_section = {file systems, version control}, 
  url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.11.2672}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.11.2672.pdf}, 
}
Byers02simpleload
@conference{Byers02simpleload,
  title = {Simple Load Balancing for Distributed Hash Tables}, 
  author = {Byers, John W. and Jeffrey Considine and Michael Mitzenmacher}, 
  booktitle = {Simple Load Balancing for Distributed Hash Tables}, 
  year = {2002}, 
  pages = {80--87}, 
  abstract = {Distributed hash tables have recently become a useful building block for a
        variety of distributed applications. However, current schemes based upon
        consistent hashing require both considerable implementation complexity and
        substantial storage overhead to achieve desired load balancing goals. We argue in
        this paper that these goals can be achieved more simply and more
        cost-effectively. First, we suggest the direct application of the power of two
        choices paradigm, whereby an item is stored at the less loaded of two (or more)
        random alternatives. We then consider how associating a small constant number of
        hash values with a key can naturally be extended to support other load balancing
        strategies, including load-stealing or load-shedding, as well as providing
        natural fault-tolerance mechanisms}, 
  www_section = {distributed hash table, load balancing}, 
  doi = {10.1007/b11823}, 
  url = {http://www.springerlink.com/content/r9r4qcqxc2bmfqmr/}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.12.277.pdf}, 
}
CADET
@conference{CADET,
  title = {CADET: Confidential Ad-hoc Decentralized End-to-End Transport}, 
  author = {Polot, Bartlomiej and Christian Grothoff}, 
  booktitle = {Med-Hoc-Net 2014}, 
  year = {2014}, 
  month = {January}, 
  abstract = {This paper describes CADET, a new transport protocol for confidential and
        authenticated data transfer in decentralized networks. This transport protocol is
        designed to operate in restricted-route scenarios such as friend-to-friend or
        ad-hoc wireless networks. We have implemented CADET and evaluated its performance
        in various network scenarios, compared it to the well-known TCP/IP stack and
        tested its response to rapidly changing network topologies. While our current
        implementation is still significantly slower in high-speed low-latency networks,
        for typical Internet-usage our system provides much better connectivity and
        security with comparable performance to TCP/IP}, 
  www_section = {CADET, encryption, GNUnet, routing}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/cadet.pdf}, 
  www_tags = {selected}, 
  url = {https://bibliography.gnunet.org}, 
}
CANS2014camera-ready
@conference{CANS2014camera-ready,
  title = {A Censorship-Resistant, Privacy-Enhancing and Fully Decentralized Name System}, 
  author = {Matthias Wachs and Martin Schanzenbach and Christian Grothoff}, 
  booktitle = {International Conference on Cryptology and Network Security (CANS)}, 
  organization = {Springer Verlag}, 
  year = {2014}, 
  publisher = {Springer Verlag}, 
  abstract = {The Domain Name System (DNS) is vital for access to information on the
        Internet. This makes it a target for attackers whose aim is to suppress free
        access to information. This paper introduces the design and implementation of the
        GNU Name System (GNS), a fully decentralized and censorship-resistant name
        system. GNS provides a privacy-enhancing alternative to DNS which preserves the
        desirable property of memorable names. Due to its design, it can also double as a
        partial replacement of public key infrastructures, such as X.509. The design of
        GNS incorporates the capability to integrate and coexist with DNS. GNS is based
        on the principle of a petname system and builds on ideas from the Simple
        Distributed Security Infrastructure (SDSI), addressing a central issue with the
        decentralized mapping of secure identifiers to memorable names: namely the
        impossibility of providing a global, secure and memorable mapping without a
        trusted authority. GNS uses the transitivity in the SDSI design to replace the
        trusted root with secure delegation of authority, thus making petnames useful to
        other users while operating under a very strong adversary model. In addition to
        describing the GNS design, we also discuss some of the mechanisms that are needed
        to smoothly integrate GNS with existing processes and procedures in Web browsers.
        Specifically, we show how GNS is able to transparently support many assumptions
        that the existing HTTP(S) infrastructure makes about globally unique names}, 
  www_section = {DNS, GNU Name System, GNUnet, PKI}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/paper_cans2014_camera_ready.pdf},
  www_tags = {selected}, 
  url = {https://bibliography.gnunet.org}, 
}
CPIR
@conference{CPIR,
  title = {Computationally private information retrieval (extended abstract)}, 
  author = {Benny Chor and Niv Gilboa}, 
  booktitle = {Proceedings of the twenty-ninth annual ACM symposium on Theory of Computing
        (STOC '97)}, 
  organization = {ACM Press}, 
  year = {1997}, 
  address = {El Paso, TX, United States}, 
  pages = {304--313}, 
  publisher = {ACM Press}, 
  abstract = {Private information retrieval (PIR) schemes enable a user to access k
        replicated copies of a database (k 2), and privately retrieve one of the n bits
        of data stored in the databases. This means that the queries give each individual
        database no partial information (in the information theoretic sense) on the
        identity of the item retrieved by the user. Today, the best two database scheme
        (k = 2) has communication complexity O(n 1=3 ), while for any constant number, k,
        the best k database scheme has communication complexity O(n 1=(2k\Gamma1) ). The
        motivation for the present work is the question whether this complexity can be
        reduced if one is willing to achieve computational privacy, rather than
        information theoretic privacy. (This means that privacy is guaranteed only with
        respect to databases that are restricted to polynomial time computations.) We
        answer this question affirmatively, and Computer Science Dept., Technion, Haifa,
        Israel}, 
  www_section = {communication complexity, private information retrieval}, 
  isbn = {0-89791-888-6}, 
  doi = {http://doi.acm.org/10.1145/258533.258609}, 
  url = {http://portal.acm.org/citation.cfm?id=258533.258609}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/chor97computationally.pdf},
}
Cabrera01herald:achieving
@conference{Cabrera01herald:achieving,
  title = {Herald: Achieving a Global Event Notification Service}, 
  author = {Luis Felipe Cabrera and Michael B. Jones and Marvin Theimer}, 
  booktitle = {In HotOS VIII}, 
  organization = {IEEE Computer Society}, 
  year = {2001}, 
  publisher = {IEEE Computer Society}, 
  abstract = {This paper presents the design philosophy and initial design decisions of
        Herald: a highly scalable global event notification system that is being designed
        and built at Microsoft Research. Herald is a distributed system designed to
        transparently scale in all respects, including numbers of subscribers and
        publishers, numbers of event subscription points, and event delivery rates. Event
        delivery can occur within a single machine, within a local network or Intranet,
        and throughout the Internet}, 
  url = {http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.20.672\&rep=rep1\&type=pdf},
%%%%% ERROR: Missing field
% www_section = {?????},
}
Cao:2002:IPG:508325.508330
@article{Cao:2002:IPG:508325.508330,
  title = {Internet pricing with a game theoretical approach: concepts and examples}, 
  author = {Cao, Xi-Ren and Shen, Hong-Xia and Milito, Rodolfo and Wirth, Patrica}, 
  journal = {IEEE/ACM Trans. Netw}, 
  volume = {10}, 
  year = {2002}, 
  month = {April}, 
  address = {Piscataway, NJ, USA}, 
  pages = {208--216}, 
  publisher = {IEEE Press}, 
  abstract = {The basic concepts of three branches of game theory, leader-follower,
        cooperative, and two-person nonzero sum games, are reviewed and applied to the
        study of the Internet pricing issue. In particular, we emphasize that the
        cooperative game (also called the bargaining problem) provides an overall picture
        for the issue. With a simple model for Internet quality of service (QoS), we
        demonstrate that the leader-follower game may lead to a solution that is not
        Pareto optimal and in some cases may be "unfair," and that the cooperative game
        may provide a better solution for both the Internet service provider (ISP) and
        the user. The practical implication of the results is that government regulation
        or arbitration may be helpful. The QoS model is also applied to study the
        competition between two ISPs, and we find a Nash equilibrium point from which the
        two ISPs would not move out without cooperation. The proposed approaches can be
        applied to other Internet pricing problems such as the Paris Metro pricing
        scheme}, 
  www_section = {bargaining problems, cooperative games, leader-follower games, Paris metro
        pricing, quality of services, two-person nonzero sum games}, 
  issn = {1063-6692}, 
  url = {http://dl.acm.org/citation.cfm?id=508325.508330}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/IEEE\%E2\%81\%84ACM\%20Trans.\%20Netw.\%2702\%20\%2810\%29-\%20Internet\%20pricing.pdf},
}
Capkun02smallworlds
@conference{Capkun02smallworlds,
  title = {Small Worlds in Security Systems: an Analysis of the PGP Certificate Graph}, 
  author = {Srdan Capkun and Levente Butty{\'a}n and Jean-Pierre Hubaux}, 
  booktitle = {In Proceedings of The ACM New Security Paradigms Workshop}, 
  organization = {ACM Press}, 
  year = {2002}, 
  pages = {28--35}, 
  publisher = {ACM Press}, 
  abstract = {We propose a new approach to securing self-organized mobile ad hoc networks.
        In this approach, security is achieved in a fully self-organized manner; by this
        we mean that the security system does not require any kind of certification
        authority or centralized server, even for the initialization phase. In our work,
        we were inspired by PGP [15] because its operation relies solely on the
        acquaintances between users. We show that the small-world phenomenon naturally
        emerges in the PGP system as a consequence of the self-organization of users. We
        show this by studying the PGP certificate graph properties and by quantifying its
        small-world characteristics. We argue that the certificate graphs of
        self-organized security systems will exhibit a similar small-world phenomenon,
        and we provide a way to model self-organized certificate graphs. The results of
        the PGP certificate graph analysis and graph modelling can be used to build new
        self-organized security systems and to test the performance of the existing
        proposals. In this work, we refer to such an example}, 
  www_section = {PGP, public key management, self-organization, small-world}, 
  isbn = {1-58113-598-X}, 
  doi = {10.1145/844102.844108}, 
  url = {http://portal.acm.org/citation.cfm?id=844102.844108}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.12.5408.pdf}, 
}
Castro02exploitingnetwork
@booklet{Castro02exploitingnetwork,
  title = {Exploiting network proximity in peer-to-peer overlay networks}, 
  author = {Miguel Castro and Peter Druschel and Y. Charlie Hu and Antony Rowstron}, 
  year = {2002}, 
  abstract = {The authors give an overview over various ways to use proximity information
        to optimize routing in peer-to-peer networks. Their study focuses on Pastry and
        describe in detail the protocols that are used in Pastry to build routing tables
        with neighbours that are close in terms of the underlying network. They give some
        analytical and extensive experimental evidence that the protocols are effective
        in reducing the length of the routing-path in terms of the link-to-link latency
        that their implementation uses to measure distance}, 
  url = {http://www.research.microsoft.com/~antr/PAST/location.ps}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/location.pdf}, 
%%%%% ERROR: Missing field
% www_section = {?????},
}
Castro02exploitingnetwork_0
@conference{Castro02exploitingnetwork_0,
  title = {Exploiting network proximity in distributed hash tables}, 
  author = {Miguel Castro and Peter Druschel and Y. Charlie Hu}, 
  booktitle = {in International Workshop on Future Directions in Distributed Computing
        (FuDiCo)}, 
  year = {2002}, 
  pages = {52--55}, 
  abstract = {Self-organizing peer-to-peer (p2p) overlay networks like CAN, Chord, Pastry
        and Tapestry (also called distributed hash tables or DHTs) offer a novel platform
        for a variety of scalable and decentralized distributed applications. These
        systems provide efficient and fault-tolerant routing, object location, and load
        balancing within a self-organizing overlay network. One important aspect of these
        systems is how they exploit network proximity in the underlying Internet. Three
        basic approaches have been proposed to exploit network proximity in DHTs,
        geographic layout, proximity routing and proximity neighbour selection. In this
        position paper, we briefly discuss the three approaches, contrast their strengths
        and shortcomings, and consider their applicability in the different DHT routing
        protocols. We conclude that proximity neighbor selection, when used in DHTs with
        prefixbased routing like Pastry and Tapestry, is highly effective and appears to
        dominate the other approaches}, 
  www_section = {CAN, distributed hash table, P2P}, 
  url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.126.3062}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/fudico.pdf}, 
}
Castro02scribe:a
@article{Castro02scribe:a,
  title = {SCRIBE: A large-scale and decentralized application-level multicast
        infrastructure}, 
  author = {Miguel Castro and Peter Druschel and Anne-Marie Kermarrec and Antony Rowstron}, 
  journal = {IEEE Journal on Selected Areas in Communications (JSAC)}, 
  volume = {20}, 
  year = {2002}, 
  pages = {0--2002}, 
  abstract = {This paper presents Scribe, a scalable application-level multicast
        infrastructure. Scribe supports large numbers of groups, with a potentially large
        number of members per group. Scribe is built on top of Pastry, a generic
        peer-to-peer object location and routing substrate overlayed on the Internet, and
        leverages Pastry's reliability, self-organization, and locality properties.
        Pastry is used to create and manage groups and to build efficient multicast trees
        for the dissemination of messages to each group. Scribe provides best-effort
        reliability guarantees, but we outline how an application can extend Scribe to
        provide stronger reliability. Simulation results, based on a realistic network
        topology model, show that Scribe scales across a wide range of groups and group
        sizes. Also, it balances the load on the nodes while achieving acceptable delay
        and link stress when compared to IP multicast}, 
  www_section = {distributed hash table, multicast, Scribe}, 
  url = {http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.20.299\&rep=rep1\&type=pdf},
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/jsac.pdf}, 
}
Castro:2003:SHM:1165389.945474
@article{Castro:2003:SHM:1165389.945474,
  title = {SplitStream: high-bandwidth multicast in cooperative environments}, 
  author = {Miguel Castro and Peter Druschel and Anne-Marie Kermarrec and Nandi, Animesh
        and Antony Rowstron and Singh, Atul}, 
  journal = {SIGOPS'03 Operating Systems Review}, 
  volume = {37}, 
  year = {2003}, 
  month = {October}, 
  address = {New York, NY, USA}, 
  pages = {298--313}, 
  publisher = {ACM}, 
  abstract = {In tree-based multicast systems, a relatively small number of interior nodes
        carry the load of forwarding multicast messages. This works well when the
        interior nodes are highly-available, dedicated infrastructure routers but it
        poses a problem for application-level multicast in peer-to-peer systems.
        SplitStream addresses this problem by striping the content across a forest of
        interior-node-disjoint multicast trees that distributes the forwarding load among
        all participating peers. For example, it is possible to construct efficient
        SplitStream forests in which each peer contributes only as much forwarding
        bandwidth as it receives. Furthermore, with appropriate content encodings,
        SplitStream is highly robust to failures because a node failure causes the loss
        of a single stripe on average. We present the design and implementation of
        SplitStream and show experimental results obtained on an Internet testbed and via
        large-scale network simulation. The results show that SplitStream distributes the
        forwarding load among all peers and can accommodate peers with different
        bandwidth capacities while imposing low overhead for forest construction and
        maintenance}, 
  www_section = {application-level multicast, content distribution, end-system multicast,
        peer-to-peer networking, video streaming}, 
  issn = {0163-5980}, 
  doi = {http://doi.acm.org/10.1145/1165389.945474}, 
  url = {http://doi.acm.org/10.1145/1165389.945474}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/SIGOSP\%2703\%20-\%20Spitstream\%3A\%20High-bandwidth\%20multicast.pdf},
}
Cattaneo:2001:DIT:647054.715628
@conference{Cattaneo:2001:DIT:647054.715628,
  title = {The Design and Implementation of a Transparent Cryptographic File System for
        UNIX}, 
  author = {Cattaneo, Giuseppe and Catuogno, Luigi and Sorbo, Aniello Del and Persiano,
        Pino}, 
  booktitle = {Proceedings of the FREENIX Track: 2001 USENIX Annual Technical Conference}, 
  organization = {USENIX Association}, 
  year = {2001}, 
  month = {June}, 
  address = {Boston, Massachusetts, USA}, 
  pages = {199--212}, 
  publisher = {USENIX Association}, 
  abstract = {Recent advances in hardware and communication technologies have made possible
        and cost e ective to share a file system among several machines over a local (but
        possibly also a wide) area network. One of the most successful and widely used
        such applications is Sun's Network File System (NFS). NFS is very simple in
        structure but assumes a very strong trust model: the user trusts the remote le
        system server (which might be running on a machine in di erent country) and a
        network with his/her data. It is easy to see that neither assumption is a very
        realistic one. The server (or anybody with superuser privileges) might very well
        read the data on its local lesytem and it is well known that the Internet or any
        local area network (e.g, Ethernet) is very easy to tap (see for example,
        Berkeley's tcpdump 7, 5] application program). Impersoni cation of users is also
        another security drawback of NFS. In fact, most of the permission checking over
        NFS are performed in the kernel of the client. In such a context a pirate can
        temporarely assign to his own workstation the Internet address of victim. Without
        secure RPC 9] no further authentication procedure is requested. From here on, the
        pirate can issue NFS requests presenting himself with any (false) uid and
        therefore accessing for reading and writing any private data on the server, even
        protected data. Given the above, a user seeking a certain level of security
        should take some measures. Possible solutions are to use either user-level
        cryptography or application level cryptography. A discussion of the drawbacks of
        these approaches is found in 4]. A better approach is to push encryption services
        into the operating system as done by M. Blaze in the design of his CFS 4]. In
        this paper, we propose a new cryptographic le system, which we call TCFS , as a
        suitable solution to the problem of privacy for distributed le system (see
        section 2.1). Our work improves on CFS by providing a deeper integration between
        the encryption service and the le system which results in a complete transparency
        of use to the user applications}, 
  www_section = {crytographic file system, UNIX}, 
  isbn = {1-880446-10-3}, 
  url = {http://dl.acm.org/citation.cfm?id=647054.715628}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/FREENIX\%2701\%20-\%20A\%20transparent\%20cryptographic\%20file\%20system\%20for\%20UNIX.pdf},
}
ChatziPP07
@conference{ChatziPP07,
  title = {Probability of Error in Information-Hiding Protocols}, 
  author = {Konstantinos Chatzikokolakis and Catuscia Palamidessi and Prakash Panangaden}, 
  booktitle = {Proceedings of the 20th IEEE Computer Security Foundations Symposium
        (CSF20)}, 
  year = {2007}, 
  abstract = {Randomized protocols for hiding private information can fruitfully be
        regarded as noisy channels in the information-theoretic sense, and the inference
        of the concealed information can be regarded as a hypothesis-testing problem. We
        consider the Bayesian approach to the problem, and investigate the probability of
        error associated to the inference when the MAP (Maximum Aposteriori Probability)
        decision rule is adopted. Our main result is a constructive characterization of a
        convex base of the probability of error, which allows us to compute its maximum
        value (over all possible input distributions), and to identify upper bounds for
        it in terms of simple functions. As a side result, we are able to improve
        substantially the Hellman-Raviv and the Santhi-Vardy bounds expressed in terms of
        conditional entropy. We then discuss an application of our methodology to the
        Crowds protocol, and in particular we show how to compute the bounds on the
        probability that an adversary breaks anonymity}, 
  www_section = {anonymity, privacy}, 
  isbn = {0-7695-2819-8}, 
  doi = {10.1109/CSF.2007.27}, 
  url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.79.2620}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/ChatziPP07.pdf}, 
}
Chen01poblano:a
@booklet{Chen01poblano:a,
  title = {Poblano: A distributed trust model for peer-to-peer networks}, 
  author = {Rita Chen and William Yeager}, 
  year = {2001}, 
  url = {http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.106.7489\&rep=rep1\&type=pdf},
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.106.7489.pdf}, 
%%%%% ERROR: Missing field
% www_section = {?????},
}
Chen:2008:IRS:1331483.1331515
@article{Chen:2008:IRS:1331483.1331515,
  title = {Insight into redundancy schemes in DHTs}, 
  author = {Chen, Guihai and Qiu, Tongqing and Wu, Fan}, 
  journal = {Journal of Supercomputing}, 
  volume = {43}, 
  year = {2008}, 
  month = {February}, 
  address = {Hingham, MA, USA}, 
  pages = {183--198}, 
  publisher = {Kluwer Academic Publishers}, 
  abstract = {In order to provide high data availability in peer-to-peer (P2P) DHTs, proper
        data redundancy schemes are required. This paper compares two popular schemes:
        replication and erasure coding. Unlike previous comparison, we take user download
        behavior into account. Furthermore, we propose a hybrid redundancy scheme, which
        shares user downloaded files for subsequent accesses and utilizes erasure coding
        to adjust file availability. Comparison experiments of three schemes show that
        replication saves more bandwidth than erasure coding, although it requires more
        storage space, when average node availability is higher than 47\%; moreover, our
        hybrid scheme saves more maintenance bandwidth with acceptable redundancy
        factor}, 
  www_section = {distributed hash table, erasure coding, peer-to-peer networking,
        redundancy, Replication}, 
  issn = {0920-8542}, 
  doi = {10.1007/s11227-007-0126-4}, 
  url = {http://dl.acm.org/citation.cfm?id=1331483.1331515}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Journal\%20of\%20Supercomputing\%20-\%20Insight\%20into\%20redundancy\%20schemes\%20in\%20DHTs.pdf},
}
Cheng:2005:SRM:1080192.1080202
@conference{Cheng:2005:SRM:1080192.1080202,
  title = {Sybilproof reputation mechanisms}, 
  author = {Cheng, Alice and Eric Friedman}, 
  booktitle = {Proceedings of the 2005 ACM SIGCOMM Workshop on Economics of Peer-to-Peer
        Systems}, 
  organization = {ACM}, 
  year = {2005}, 
  month = {August}, 
  address = {Philadelphia, PA}, 
  pages = {128--132}, 
  publisher = {ACM}, 
  series = {P2PECON '05}, 
  abstract = {Due to the open, anonymous nature of many P2P networks, new identities--or
        sybils--may be created cheaply and in large numbers. Given a reputation system, a
        peer may attempt to falsely raise its reputation by creating fake links between
        its sybils. Many existing reputation mechanisms are not resistant to these types
        of strategies.Using a static graph formulation of reputation, we attempt to
        formalize the notion of sybilproofness. We show that there is no symmetric
        sybilproof reputation function. For nonsymmetric reputations, following the
        notion of reputation propagation along paths, we give a general asymmetric
        reputation function based on flow and give conditions for sybilproofness}, 
  www_section = {peer-to-peer networking, reputation, Sybil attack}, 
  isbn = {1-59593-026-4}, 
  doi = {http://doi.acm.org/10.1145/1080192.1080202}, 
  url = {http://doi.acm.org/10.1145/1080192.1080202}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/ACM\%20SIGCOMM\%2705\%20-\%20Cheng\%20\%26\%20Friedman\%20-\%20Sybilproof\%20reputation\%20mechanisms.pdf},
}
Cholez:2009:ESA:1574663.1574671
@conference{Cholez:2009:ESA:1574663.1574671,
  title = {Evaluation of Sybil Attacks Protection Schemes in KAD}, 
  author = {Cholez, Thibault and Chrisment, Isabelle and Festor, Olivier}, 
  booktitle = {AIMS'09--Proceedings of the 3rd International Conference on Autonomous
        Infrastructure, Management and Security: Scalability of Networks and Services}, 
  organization = {Springer-Verlag}, 
  volume = {5637}, 
  year = {2009}, 
  month = {June}, 
  address = {Enschede, The Netherlands}, 
  pages = {70--82}, 
  publisher = {Springer-Verlag}, 
  series = {Lecture Notes in Computer Science}, 
  abstract = {In this paper, we assess the protection mechanisms entered into recent
        clients to fight against the Sybil attack in KAD, a widely deployed Distributed
        Hash Table. We study three main mechanisms: a protection against flooding through
        packet tracking, an IP address limitation and a verification of identities. We
        evaluate their efficiency by designing and adapting an attack for several KAD
        clients with different levels of protection. Our results show that the new
        security rules mitigate the Sybil attacks previously launched. However, we prove
        that it is still possible to control a small part of the network despite the new
        inserted defenses with a distributed eclipse attack and limited resources}, 
  www_section = {defense, distributed hash table, KAD, p2p network, security, Sybil
        attack}, 
  isbn = {978-3-642-02626-3}, 
  doi = {http://dx.doi.org/10.1007/978-3-642-02627-0_6}, 
  url = {http://dx.doi.org/10.1007/978-3-642-02627-0_6}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/AIMS\%2709\%20-\%20Sybil\%20attacks\%20protection\%20schemes\%20in\%20KAD.pdf},
}
Clarke00freenet:a
@conference{Clarke00freenet:a,
  title = {Freenet: A Distributed Anonymous Information Storage and Retrieval System}, 
  author = {Ian Clarke and Sandberg, Oskar and Brandon Wiley and Theodore W. Hong}, 
  booktitle = {Freenet: A Distributed Anonymous Information Storage and Retrieval System}, 
  year = {2000}, 
  pages = {46--66}, 
  abstract = {We describe Freenet, an adaptive peer-to-peer network application that
        permits the publication, replication, and retrieval of data while protecting the
        anonymity of both authors and readers. Freenet operates as a network of identical
        nodes that collectively pool their storage space to store data files and
        cooperate to route requests to the most likely physical location of data. No
        broadcast search or centralized location index is employed. Files are referred to
        in a location-independent manner, and are dynamically replicated in locations
        near requestors and deleted from locations where there is no interest. It is
        infeasible to discover the true origin or destination of a file passing through
        the network, and di$\#$cult for a node operator to determine or be held
        responsible for the actual physical contents of her own node}, 
  www_section = {Freenet, P2P}, 
  url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.10.4919}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.10.4919.pdf}, 
}
Clarke00freenet:a_0
@conference{Clarke00freenet:a_0,
  title = {Freenet: A Distributed Anonymous Information Storage and Retrieval System}, 
  author = {Ian Clarke and Sandberg, Oskar and Brandon Wiley and Theodore W. Hong}, 
  booktitle = {Designing Privacy Enhancing Technologies, International Workshop on Design
        Issues in Anonymity and Unobservability, ,, Proceedings 2001}, 
  year = {2000}, 
  month = {July}, 
  address = {Berkeley, CA, USA}, 
  pages = {46--66}, 
  abstract = {We describe Freenet, an adaptive peer-to-peer network application that
        permits the publication, replication, and retrieval of data while protecting the
        anonymity of both authors and readers. Freenet operates as a network of identical
        nodes that collectively pool their storage space to store data files and
        cooperate to route requests to the most likely physical location of data. No
        broadcast search or centralized location index is employed. Files are referred to
        in a location-independent manner, and are dynamically replicated in locations
        near requestors and deleted from locations where there is no interest. It is
        infeasible to discover the true origin or destination of a file passing through
        the network, and di$\#$cult for a node operator to determine or be held
        responsible for the actual physical contents of her own node}, 
  url = {http://www.ecse.rpi.edu/Homepages/shivkuma/teaching/sp2001/readings/freenet.pdf}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/freenet.pdf}, 
%%%%% ERROR: Missing field
% www_section = {?????},
}
Clifton:2002:TPP:772862.772867
@article{Clifton:2002:TPP:772862.772867,
  title = {Tools for privacy preserving distributed data mining}, 
  author = {Clifton, Chris and Kantarcioglu, Murat and Vaidya, Jaideep and Lin, Xiaodong
        and Zhu, Michael Y.}, 
  journal = {SIGKDD Explorations Newsletter}, 
  volume = {4}, 
  number = {2}, 
  year = {2002}, 
  month = {December}, 
  address = {New York, NY, USA}, 
  pages = {28--34}, 
  publisher = {ACM}, 
  abstract = {Privacy preserving mining of distributed data has numerous applications. Each
        application poses different constraints: What is meant by privacy, what are the
        desired results, how is the data distributed, what are the constraints on
        collaboration and cooperative computing, etc. We suggest that the solution to
        this is a toolkit of components that can be combined for specific
        privacy-preserving data mining applications. This paper presents some components
        of such a toolkit, and shows how they can be used to solve several
        privacy-preserving data mining problems}, 
  www_section = {PIR, privacy, security}, 
  issn = {1931-0145}, 
  doi = {10.1145/772862.772867}, 
  url = {http://doi.acm.org/10.1145/772862.772867}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/SIGKDD\%20Explor.\%20Newsl.\%20-\%20Distributed\%20data\%20mining.pdf},
}
CoNext2008
@conference{CoNext2008,
  title = {EGOIST: Overlay Routing using Selfish Neighbor Selection}, 
  author = {Georgios Smaragdakis and Vassilis Lekakis and Nikolaos Laoutaris and Azer
        Bestavros and Byers, John W. and Mema Roussopoulos}, 
  booktitle = {Proceedings of ACM CoNEXT 2008}, 
  year = {2008}, 
  month = {December}, 
  address = {Madrid, Spain}, 
  abstract = {A foundational issue underlying many overlay network applications ranging
        from routing to peer-to-peer file sharing is that of connectivity management,
        i.e., folding new arrivals into an existing overlay, and re-wiring to cope with
        changing network conditions. Previous work has considered the problem from two
        perspectives: devising practical heuristics for specific applications designed to
        work well in real deployments, and providing abstractions for the underlying
        problem that are analytically tractable, especially via game-theoretic analysis.
        In this paper, we unify these two thrusts by using insights gleaned from novel,
        realistic theoretic models in the design of Egoist -- a distributed overlay
        routing system that we implemented, deployed, and evaluated on PlanetLab. Using
        extensive measurements of paths between nodes, we demonstrate that Egoist's
        neighbor selection primitives significantly outperform existing heuristics on a
        variety of performance metrics, including delay, available bandwidth, and node
        utilization. Moreover, we demonstrate that Egoist is competitive with an optimal,
        but unscalable full-mesh approach, remains highly effective under significant
        churn, is robust to cheating, and incurs minimal overhead. Finally, we use a
        multiplayer peer-to-peer game to demonstrate the value of Egoist to end-user
        applications}, 
  www_section = {EGOIST, game theory, overlay networks, routing, selfish neighbor
        selection}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/CoNEXT2008.pdf}, 
}
Cohen02replicationstrategies
@conference{Cohen02replicationstrategies,
  title = {Replication Strategies in Unstructured Peer-to-Peer Networks}, 
  author = {Edith Cohen and S Shenker}, 
  booktitle = {Proceedings of the 2002 SIGCOMM conference}, 
  organization = {ACM New York, NY, USA}, 
  volume = {Volume 32 , Issue 4}, 
  year = {2002}, 
  month = {October}, 
  address = {Pittsburgh}, 
  pages = {177--190}, 
  publisher = {ACM New York, NY, USA}, 
  abstract = {The Peer-to-Peer (P2P) architectures that are most prevalent in today's
        Internet are decentralized and unstructured. Search is blind in that it is
        independent of the query and is thus not more effective than probing randomly
        chosen peers. One technique to improve the effectiveness of blind search is to
        proactively replicate data}, 
  url = {http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.19.9873\&rep=rep1\&type=pdf},
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/replication.pdf}, 
%%%%% ERROR: Missing field
% www_section = {?????},
}
Conner:2007:SPM:1377934.1377937
@conference{Conner:2007:SPM:1377934.1377937,
  title = {Securing peer-to-peer media streaming systems from selfish and malicious
        behavior}, 
  author = {Conner, William and Nahrstedt, Klara}, 
  booktitle = {MDS'07. Proceedings of the 4th on Middleware Doctoral Symposium}, 
  organization = {ACM}, 
  volume = {13}, 
  year = {2007}, 
  month = {November}, 
  address = {Newport Beach, CA, USA}, 
  pages = {1--6}, 
  publisher = {ACM}, 
  series = {MDS '07}, 
  abstract = {We present a flexible framework for throttling attackers in peer-to-peer
        media streaming systems. In such systems, selfish nodes (e.g., free riders) and
        malicious nodes (e.g., DoS attackers) can overwhelm the system by issuing too
        many requests in a short interval of time. Since peer-to-peer systems are
        decentralized, it is difficult for individual peers to limit the aggregate
        download bandwidth consumed by other remote peers. This could potentially allow
        selfish and malicious peers to exhaust the system's available upload bandwidth.
        In this paper, we propose a framework to provide a solution to this problem by
        utilizing a subset of trusted peers (called kantoku nodes) that collectively
        monitor the bandwidth usage of untrusted peers in the system and throttle
        attackers. This framework has been evaluated through simulation thus far.
        Experiments with a full implementation on a network testbed are part of our
        future work}, 
  www_section = {accounting, multimedia, peer-to-peer networking, security}, 
  isbn = {978-1-59593-933-3}, 
  doi = {http://doi.acm.org/10.1145/1377934.1377937}, 
  url = {http://doi.acm.org/10.1145/1377934.1377937}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/MDS\%2707\%20-\%20Conner\%20\%26\%20Nahrstedt\%20-\%20Securing\%20peer-to-peer\%20media\%20streaming\%20systems.pdf},
}
Conrad03multiplelanguage
@conference{Conrad03multiplelanguage,
  title = {Multiple language family support for programmable network systems}, 
  author = {Michael Conrad and Marcus Schoeller and Thomas Fuhrmann and Gerhard Bocksch and
        Martina Zitterbart}, 
  booktitle = {In Proceedings of the 5th Annual International Working Conference on Active
        Networks (IWAN)}, 
  year = {2003}, 
  abstract = {Various programmable networks have been designed and implemented during the
        last couple of years. Many of them are focused on a single programming language
        only. This limitation might{\textemdash}to a certain extend{\textemdash}hinder
        the productivity of service modules being programmed for such networks.
        Therefore, the concurrent support of service modules written in multiple
        programming languages was investigated within the FlexiNet project. Basically,
        support for three major programming paradigms was incorporated into FlexiNet:
        compiled programming languages like C, interpreted languages (e.g., Java), and
        hardware description languages such as VHDL. The key concept can be seen in an
        integral interface that is used by all three programming languages. This leads to
        a configuration scheme which is totally transparent to the programming languages
        used to develop the service. In order to get a better idea about the impact of
        the programming language used, some measurement experiments were conducted}, 
  www_section = {flexible service platforms, programmable networks}, 
  isbn = {978-3-540-21250-8}, 
  doi = {10.1007/b96396}, 
  url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.68.3301}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/scholler03language.pdf},
}
Conrad04SecureServiceSignaling
@conference{Conrad04SecureServiceSignaling,
  title = {Secure Service Signaling and fast Authorization in Programmable Networks}, 
  author = {Michael Conrad and Thomas Fuhrmann and Marcus Schoeller and Martina
        Zitterbart}, 
  booktitle = {Proceedings of the 6th International Working Conference on Active Networking
        (IWAN) 2004}, 
  organization = {Springer Berlin / Heidelberg}, 
  year = {2004}, 
  address = {Lawrence, Kansas}, 
  publisher = {Springer Berlin / Heidelberg}, 
  type = {publication}, 
  abstract = {Programmable networks aim at the fast and flexible creation of services
        within a network. Often cited examples are audio and video transcoding,
        application layer multicast, or mobility and resilience support. In order to
        become commercially viable, programmable networks must provide authentication,
        authorization and accounting functionality. The mechanisms used to achieve these
        functionalities must be secure, reliable, and scalable, to be used in production
        scale programmable networks. Additionally programmable nodes must resist various
        kinds of attacks, such as denial of service or replay attacks. Fraudulent use by
        individual users must also be prohibited. This paper describes the design and
        implementation of a secure, reliable, and scalable signaling mechanism clients
        can use to initiate service startup and to manage services running on the nodes
        of a programmable network. This mechanism is designed for production scale
        networks with AAA-functionality}, 
  www_section = {programmable networks, secrecy}, 
  isbn = {978-3-540-71499-6}, 
  doi = {10.1007/978-3-540-71500-9}, 
  url = {http://i30www.ira.uka.de/research/publications/p2p/}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/iwan2004.pdf}, 
}
Cooley_abs:the
@booklet{Cooley_abs:the,
  title = {ABS: The Apportioned Backup System}, 
  author = {Joe Cooley and Chris Taylor and Alen Peacock}, 
  year = {2004}, 
  abstract = {Many personal computers are operated with no backup strategy for protecting
        data in the event of loss or failure. At the same time, PCs are likely to contain
        spare disk space and unused networking resources. We present the Apportioned
        Backup System (ABS), which provides a reliable collaborative backup resource by
        leveraging these independent, distributed resources. With ABS, procuring and
        maintaining specialized backup hardware is unnecessary. ABS makes efficient use
        of network and storage resources through use of coding techniques, convergent
        encryption and storage, and efficient versioning and verification processes. The
        system also painlessly accommodates dynamic expansion of system compute, storage,
        and network resources, and is tolerant of catastrophic node failures}, 
  www_section = {apportioned backup system}, 
  url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.120.6858}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.120.6858.pdf}, 
}
Coulom:2006:ESB:1777826.1777833
@conference{Coulom:2006:ESB:1777826.1777833,
  title = {Efficient selectivity and backup operators in Monte-Carlo tree search}, 
  author = {Coulom, R{\'e}mi}, 
  booktitle = {CG'06--Proceedings of the 5th international conference on Computers and
        games}, 
  organization = {Springer-Verlag}, 
  year = {2007}, 
  address = {Turin, Italy}, 
  pages = {72--83}, 
  publisher = {Springer-Verlag}, 
  series = {CG'06}, 
  abstract = {A Monte-Carlo evaluation consists in estimating a position by averaging the
        outcome of several random continuations. The method can serve as an evaluation
        function at the leaves of a min-max tree. This paper presents a new framework to
        combine tree search with Monte-Carlo evaluation, that does not separate between a
        min-max phase and a Monte-Carlo phase. Instead of backing-up the min-max value
        close to the root, and the average value at some depth, a more general backup
        operator is defined that progressively changes from averaging to minmax as the
        number of simulations grows. This approach provides a finegrained control of the
        tree growth, at the level of individual simulations, and allows efficient
        selectivity. The resulting algorithm was implemented in a 9 {\texttimes} 9
        Go-playing program, Crazy Stone, that won the 10th KGS computer-Go tournament}, 
  www_section = {framework, MCTS, Monte-Carlo Tree Search}, 
  isbn = {3-540-75537-3, 978-3-540-75537-1}, 
  url = {http://dl.acm.org/citation.cfm?id=1777826.1777833}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/CG\%2706\%20-\%20Selectivity\%20and\%20backup\%20operators\%20in\%20MCTS.pdf},
}
Cox02pastiche:making
@booklet{Cox02pastiche:making,
  title = {Pastiche: Making Backup Cheap and Easy}, 
  author = {Landon P. Cox and Christopher D. Murray and Brian D. Noble}, 
  year = {2002}, 
  abstract = {Backup is cumbersome and expensive. Individual users almost never back up
        their data, and backup is a significant cost in large organizations. This paper
        presents Pastiche, a simple and inexpensive backup system. Pastiche exploits
        excess disk capacity to perform peer-to-peer backup with no administrative costs.
        Each node minimizes storage overhead by selecting peers that share a significant
        amount of data. It is easy for common installations to find suitable peers, and
        peers with high overlap can be identified with only hundreds of bytes. Pastiche
        provides mechanisms for confidentiality, integrity, and detection of failed or
        malicious peers. A Pastiche prototype suffers only 7.4\% overhead for a modified
        Andrew Benchmark, and restore performance is comparable to cross-machine copy}, 
  www_section = {backup, P2P}, 
  url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.15.3254}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.15.3254.pdf}, 
}
Cox:2004:PDN:972374.972394
@article{Cox:2004:PDN:972374.972394,
  title = {Practical, distributed network coordinates}, 
  author = {Russ Cox and Dabek, Frank and Frans M. Kaashoek and Li, Jinyang and Robert
        Morris}, 
  journal = {SIGCOMM Computer Communication Review}, 
  volume = {34}, 
  year = {2004}, 
  month = {January}, 
  address = {New York, NY, USA}, 
  pages = {113--118}, 
  publisher = {ACM}, 
  abstract = {Vivaldi is a distributed algorithm that assigns synthetic coordinates to
        internet hosts, so that the Euclidean distance between two hosts' coordinates
        predicts the network latency between them. Each node in Vivaldi computes its
        coordinates by simulating its position in a network of physical springs. Vivaldi
        is both distributed and efficient: no fixed infrastructure need be deployed and a
        new host can compute useful coordinates after collecting latency information from
        only a few other hosts. Vivaldi can rely on piggy-backing latency information on
        application traffic instead of generating extra traffic by sending its own probe
        packets.This paper evaluates Vivaldi through simulations of 750 hosts, with a
        matrix of inter-host latencies derived from measurements between 750 real
        Internet hosts. Vivaldi finds synthetic coordinates that predict the measured
        latencies with a median relative error of 14 percent. The simulations show that a
        new host joining an existing Vivaldi system requires fewer than 10 probes to
        achieve this accuracy. Vivaldi is currently used by the Chord distributed hash
        table to perform proximity routing, replica selection, and retransmission timer
        estimation}, 
  www_section = {network coordinates, proximity routing, replica selection, retransmission
        timer estimation, Vivaldi}, 
  issn = {0146-4833}, 
  doi = {http://doi.acm.org/10.1145/972374.972394}, 
  url = {http://doi.acm.org/10.1145/972374.972394}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/SIGCOMM\%20Comput.\%20Commun.\%20Rev.\%20-\%20Practical\%2C\%20distributed\%20network\%20coordinates.pdf},
}
Cramer04Bootstrapping
@conference{Cramer04Bootstrapping,
  title = {Bootstrapping Locality-Aware P2P Networks}, 
  author = {Cramer, Curt and Kendy Kutzner and Thomas Fuhrmann}, 
  booktitle = {Proceedings of the IEEE International Conference on Networks (ICON 2004)}, 
  volume = {1}, 
  year = {2004}, 
  address = {Singapore}, 
  pages = {357--361}, 
  type = {publication}, 
  abstract = {Bootstrapping is a vital core functionality required by every peer-to-peer
        (P2P) overlay network. Nodes intending to participate in such an overlay network
        initially have to find at least one node that is already part of this network.
        While structured P2P networks (e.g. distributed hash tables, DHTs) define rules
        about how to proceed after this point, unstructured P2P networks continue using
        bootstrapping techniques until they are sufficiently connected. In this paper, we
        compare solutions applicable to the bootstrapping problem. Measurements of an
        existing system, the Gnutella web caches, highlight the inefficiency of this
        particular approach. Improved bootstrapping mechanisms could also incorporate
        locality-awareness into the process. We propose an advanced mechanism by which
        the overlay topology is--to some extent--matched with the underlying topology.
        Thereby, the performance of the overall system can be vastly improved}, 
  www_section = {bootstrapping, distributed hash table, P2P}, 
  isbn = {0-7803-8783-X}, 
  doi = {10.1109/ICON.2004.1409169}, 
  url = {http://i30www.ira.uka.de/research/publications/p2p/}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/cramer04bootstrapping.pdf},
}
Cramer04DemandDrivenClustering
@conference{Cramer04DemandDrivenClustering,
  title = {Demand-Driven Clustering in MANETs}, 
  author = {Cramer, Curt and Oliver Stanze and Kilian Weniger and Martina Zitterbart}, 
  booktitle = {Proceedings of the 2004 International Conference on Wireless Networks (ICWN
        '04)}, 
  volume = {1}, 
  year = {2004}, 
  address = {Las Vegas, NV}, 
  pages = {81--87}, 
  type = {publication}, 
  abstract = {Many clustering protocols for mobile ad hoc networks (MANETs) have been
        proposed in the literature. With only one exception so far [1], all these
        protocols are proactive, thus wasting bandwidth when their function is not
        currently needed. To reduce the signalling traffic load, reactive clustering may
        be employed. We have developed a clustering protocol named
        {\textquotedblleft}On-Demand Group Mobility-Based Clustering {\textquotedblright}
        (ODGMBC) which is reactive. Its goal is to build clusters as a basis for address
        autoconfiguration and hierarchical routing. The design process especially
        addresses the notion of group mobility in a MANET. As a result, ODGMBC maps
        varying physical node groups onto logical clusters. In this paper, ODGMBC is
        described. It was implemented for the ad hoc network simulator GloMoSim [2] and
        evaluated using several performance indicators. Simulation results are promising
        and show that ODGMBC leads to stable clusters. This stability is advantageous for
        autoconfiguration and routing mechansims to be employed in conjunction with the
        clustering algorithm. Index Terms {\textemdash} clustering, multi-hop, reactive,
        MANET, group mobility}, 
  www_section = {mobile Ad-hoc networks, multi-hop networks}, 
  url = {http://i30www.ira.uka.de/research/publications/p2p/}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/cramer04odgmbc.pdf}, 
}
Cramer04LifeScience
@conference{Cramer04LifeScience,
  title = {Peer-to-Peer Overlays and Data Integration in a Life Science Grid}, 
  author = {Cramer, Curt and Andrea Schafferhans and Thomas Fuhrmann}, 
  booktitle = {Proceedings of the First International Workshop of the EU Network of
        Excellence DELOS on Digital Library Architectures}, 
  year = {2004}, 
  address = {Cagliari, Italy}, 
  pages = {127--138}, 
  type = {publication}, 
  abstract = {Databases and Grid computing are a good match. With the service orientation
        of Grid computing, the complexity of maintaining and integrating databases can be
        kept away from the actual users. Data access and integration is performed via
        services, which also allow to employ an access control. While it is our
        perception that many proposed Grid applications rely on a centralized and static
        infrastructure, Peer-to-Peer (P2P) technologies might help to dynamically scale
        and enhance Grid applications. The focus does not lie on publicly available P2P
        networks here, but on the self-organizing capabilities of P2P networks in
        general. A P2P overlay could, e.g., be used to improve the distribution of
        queries in a data Grid. For studying the combination of these three technologies,
        Grid computing, databases, and P2P, in this paper, we use an existing application
        from the life sciences, drug target validation, as an example. In its current
        form, this system has several drawbacks. We believe that they can be alleviated
        by using a combination of the service-based architecture of Grid computing and
        P2P technologies for implementing the services. The work presented in this paper
        is in progress. We mainly focus on the description of the current system state,
        its problems and the proposed new architecture. For a better understanding, we
        also outline the main topics related to the work presented here}, 
  www_section = {GRID, overlay networks, P2P}, 
  url = {http://i30www.ira.uka.de/research/publications/p2p/}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/cramer04lifescience.pdf},
}
Cramer04Scheduling
@conference{Cramer04Scheduling,
  title = {Distributed Job Scheduling in a Peer-to-Peer Video Recording System}, 
  author = {Cramer, Curt and Kendy Kutzner and Thomas Fuhrmann}, 
  booktitle = {Proceedings of the Workshop on Algorithms and Protocols for Efficient
        Peer-to-Peer Applications (PEPPA) at Informatik 2004}, 
  year = {2004}, 
  address = {Ulm, Germany}, 
  pages = {234--238}, 
  type = {publication}, 
  abstract = {Since the advent of Gnutella, Peer-to-Peer (P2P) protocols have matured
        towards a fundamental design element for large-scale, self-organising distributed
        systems. Many research efforts have been invested to improve various aspects of
        P2P systems, like their performance, scalability, and so on. However, little
        experience has been gathered from the actual deployment of such P2P systems apart
        from the typical file sharing applications. To bridge this gap and to gain more
        experience in making the transition from theory to practice, we started building
        advanced P2P applications whose explicit goal is {\textquotedblleft}to be
        deployed in the wild{\textquotedblright}. In this paper, we describe a fully
        decentralised P2P video recording system. Every node in the system is a networked
        computer (desktop PC or set-top box) capable of receiving and recording DVB-S,
        i.e. digital satellite TV. Like a normal video recorder, users can program their
        machines to record certain programmes. With our system, they will be able to
        schedule multiple recordings in parallel. It is the task of the system to assign
        the recordings to different machines in the network. Moreover, users can
        {\textquotedblleft}record broadcasts in the past{\textquotedblright}, i.e. the
        system serves as a short-term archival storage}, 
  www_section = {DVB, P2P}, 
  url = {http://i30www.ira.uka.de/research/publications/p2p/}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/cramer04scheduling.pdf},
}
Cuenca-Acuna03planetp:using
@conference{Cuenca-Acuna03planetp:using,
  title = {PlanetP: Using Gossiping to Build Content Addressable Peer-to-Peer Information
        Sharing Communities}, 
  author = {Francisco Matias Cuenca-Acuna and Christopher Peery and Richard P. Martin and
        Thu D. Nguyen}, 
  booktitle = {12th IEEE International Symposium on High Performance Distributed Computing
        (HPDC-12 '03),}, 
  organization = {IEEE Press}, 
  year = {2003}, 
  address = {Seattle, Washington}, 
  publisher = {IEEE Press}, 
  abstract = {PlanetP is a peer-to-peer system in which searching content is done mostly
        locally. Every peer knows which content is available at which other peers. The
        index information is represented compactly using bloom filters and distributed
        throughout the network using push and pull mechanisms}, 
  url = {http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.14.6056\&rep=rep1\&type=url\&i=0},
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/paper.dvi_.pdf}, 
%%%%% ERROR: Missing field
% www_section = {?????},
}
DASEIN
@article{DASEIN,
  title = {Decentralized Authentication for Self-Sovereign Identities using Name Systems}, 
  author = {Christian Grothoff and Martin Schanzenbach and Annett Laube and Emmanuel
        Benoist}, 
%%%%% ERROR: Missing field
% journal = {?????},
  institution = {Berner Fachhochschule}, 
  number = {847382}, 
  year = {2018}, 
  month = {October}, 
  address = {Bern}, 
  type = {H2020 submission}, 
  abstract = {The GNU Name System (GNS) is a fully decentralized public key infrastructure
        and name system with private information retrieval semantics. It serves a
        holistic approach to interact seamlessly with IoT ecosystems and enables people
        and their smart objects to prove their identity, membership and privileges -
        compatible with existing technologies. In this report we demonstrate how a wide
        range of private authentication and identity management scenarios are addressed
        by GNS in a cost-efficient, usable and secure manner. This simple, secure and
        privacy-friendly authentication method is a significant breakthrough when cyber
        peace, privacy and liability are the priorities for the benefit of a wide range
        of the population. After an introduction to GNS itself, we show how GNS can be
        used to authenticate servers, replacing the Domain Name System (DNS) and X.509
        certificate authorities (CAs) with a more privacy-friendly but equally usable
        protocol which is trustworthy, human-centric and includes group authentication.
        We also built a demonstrator to highlight how GNS can be used in medical
        computing to simplify privacy-sensitive data processing in the Swiss health-care
        system. Combining GNS with attribute-based encryption, we created ReclaimID, a
        robust and reliable OpenID Connect-compatible authorization system. It includes
        simple, secure and privacy-friendly single sign-on to seamlessly share selected
        attributes with Web services, cloud ecosystems. Further, we demonstrate how
        ReclaimID can be used to solve the problem of addressing, authentication and data
        sharing for IoT devices. These applications are just the beginning for GNS; the
        versatility and extensibility of the protocol will lend itself to an even broader
        range of use-cases. GNS is an open standard with a complete free software
        reference implementation created by the GNU project. It can therefore be easily
        audited, adapted, enhanced, tailored, developed and/or integrated, as anyone is
        allowed to use the core protocols and implementations free of charge, and to
        adopt them to their needs under the terms of the GNU Affero General Public
        License, a free software license approved by the Free Software Foundation.}, 
  keywords = {DNS, GNU Name System, GNUnet, privacy, ReclaimID}, 
  www_section = {DNS, GNU Name System, GNUnet, privacy, ReclaimID}, 
  www_tags = {selected}, 
  url = {https://bibliography.gnunet.org}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/dasein10.pdf}, 
}
DBLP:conf/ccs/EdmanS09
@conference{DBLP:conf/ccs/EdmanS09,
  title = {AS-awareness in Tor path selection}, 
  author = {Matthew Edman and Paul Syverson}, 
  booktitle = {Proceedings of the 2009 ACM Conference on Computer and Communications
        Security, CCS 2009, Chicago, Illinois, USA, November 9-13, 2009}, 
  organization = {ACM}, 
  year = {2009}, 
  pages = {380--389}, 
  editor = {Ehab Al-Shaer and Somesh Jha and Angelos D. Keromytis}, 
  publisher = {ACM}, 
  abstract = {Tor is an anonymous communications network with thousands of router nodes
        worldwide. An intuition reflected in much of the literature on anonymous
        communications is that, as an anonymity network grows, it becomes more secure
        against a given observer because the observer will see less of the network. In
        particular, as the Tor network grows from volunteers operating relays all over
        the world, it becomes less and less likely for a single autonomous system (AS) to
        be able to observe both ends of an anonymous connection. Yet, as the network
        continues to grow significantly, no analysis has been done to determine if this
        intuition is correct. Further, modifications to Tor's path selection algorithm to
        help clients avoid an AS-level observer have not been proposed and analyzed. Five
        years ago a previous study examined the AS-level threat against client and
        destination addresses chosen a priori to be likely or interesting to examine.
        Using an AS-level path inference algorithm with improved accuracy, more extensive
        Internet routing data, and, most importantly, a model of typical Tor client
        AS-level sources and destinations based on data gathered from the live network,
        we demonstrate that the threat of a single AS observing both ends of an anonymous
        Tor connection is greater than previously thought. We look at the growth of the
        Tor network over the past five years and show that its explosive growth has had
        only a small impact on the network's robustness against an AS-level attacker.
        Finally, we propose and evaluate the effectiveness of some simple, AS-aware path
        selection algorithms that avoid the computational overhead imposed by full
        AS-level path inference algorithms. Our results indicate that a novel heuristic
        we propose is more effective against an AS-level observer than other commonly
        proposed heuristics for improving location diversity in path selection}, 
  www_section = {anonymity, autonomous systems, privacy, Tor}, 
  isbn = {978-1-60558-894-0}, 
  doi = {10.1145/1653662.1653708}, 
  url = {http://portal.acm.org/citation.cfm?id=1653662.1653708}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/EdmanS09.pdf}, 
}
DBLP:conf/ccs/TroncosoD09
@conference{DBLP:conf/ccs/TroncosoD09,
  title = {The bayesian traffic analysis of mix networks}, 
  author = {Carmela Troncoso and George Danezis}, 
  booktitle = {Proceedings of the 2009 ACM Conference on Computer and Communications
        Security, CCS 2009, Chicago, Illinois, USA, November 9-13, 2009}, 
  organization = {ACM}, 
  year = {2009}, 
  pages = {369--379}, 
  editor = {Ehab Al-Shaer and Somesh Jha and Angelos D. Keromytis}, 
  publisher = {ACM}, 
  abstract = {This work casts the traffic analysis of anonymity systems, and in particular
        mix networks, in the context of Bayesian inference. A generative probabilistic
        model of mix network architectures is presented, that incorporates a number of
        attack techniques in the traffic analysis literature. We use the model to build
        an Markov Chain Monte Carlo inference engine, that calculates the probabilities
        of who is talking to whom given an observation of network traces. We provide a
        thorough evaluation of its correctness and performance, and confirm that mix
        networks with realistic parameters are secure. This approach enables us to apply
        established information theoretic anonymity metrics on complex mix networks, and
        extract information from anonymised traffic traces optimally}, 
  www_section = {anonymity, Markov chain, traffic analysis}, 
  isbn = {978-1-60558-894-0}, 
  doi = {10.1145/1653662.1653707}, 
  url = {http://portal.acm.org/citation.cfm?id=1653662.1653707}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/TroncosoD09.pdf}, 
}
DBLP:conf/ccs/VassermanJTHK09
@conference{DBLP:conf/ccs/VassermanJTHK09,
  title = {Membership-concealing overlay networks}, 
  author = {Eugene Y. Vasserman and Rob Jansen and James Tyra and Nicholas J. Hopper and
        Yongdae Kim}, 
  booktitle = {Proceedings of the 2009 ACM Conference on Computer and Communications
        Security, CCS 2009, Chicago, Illinois, USA, November 9-13, 2009}, 
  organization = {ACM}, 
  year = {2009}, 
  pages = {390--399}, 
  editor = {Ehab Al-Shaer and Somesh Jha and Angelos D. Keromytis}, 
  publisher = {ACM}, 
  www_section = {membership concealment, P2P, privacy}, 
  isbn = {978-1-60558-894-0}, 
  doi = {10.1145/1653662.1653709}, 
  url = {http://portal.acm.org/citation.cfm?id=1653662.1653709}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/VassermanJTHK09.pdf}, 
}
DBLP:conf/ccs/YangG03
@conference{DBLP:conf/ccs/YangG03,
  title = {PPay: micropayments for peer-to-peer systems}, 
  author = {Beverly Yang and Hector Garcia-Molina}, 
  booktitle = {CCS'03. Proceedings od the 10th ACM Conference on Computer and
        Communications Security}, 
  organization = {ACM}, 
  year = {2003}, 
  month = {October}, 
  address = {Washington, DC, USA}, 
  pages = {300--310}, 
  publisher = {ACM}, 
  www_section = {economics, payment}, 
  isbn = {1-58113-738-9}, 
  doi = {http://dx.doi.org/10.1145/948109.948150}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/CCS\%2703\%20-\%20Yang\%20\%26\%20Garcia-Molina\%20-\%20PPay.pdf},
}
DBLP:conf/dbsec/Kerschbaum11
@conference{DBLP:conf/dbsec/Kerschbaum11,
  title = {Public-Key Encrypted Bloom Filters with Applications to Supply Chain Integrity}, 
  author = {Florian Kerschbaum}, 
  booktitle = {Public-Key Encrypted Bloom Filters with Applications to Supply Chain
        Integrity}, 
  year = {2011}, 
  pages = {60--75}, 
  www_section = {Unsorted}, 
  url = {https://bibliography.gnunet.org}, 
}
DBLP:conf/diau/PfitzmannK00
@conference{DBLP:conf/diau/PfitzmannK00,
  title = {Anonymity, Unobservability, and Pseudonymity--A Proposal for Terminology}, 
  author = {Andreas Pfitzmann and Marit K{\"o}hntopp}, 
  booktitle = {Workshop on Design Issues in Anonymity and Unobservability}, 
  year = {2000}, 
  pages = {1--9}, 
  url = {https://bibliography.gnunet.org}, 
%%%%% ERROR: Missing field
% www_section = {?????},
}
DBLP:conf/esorics/DanezisDKT09
@conference{DBLP:conf/esorics/DanezisDKT09,
  title = {The Wisdom of Crowds: Attacks and Optimal Constructions}, 
  author = {George Danezis and Claudia Diaz and Emilia K{\"a}sper and Carmela Troncoso}, 
  booktitle = {Proceedings of the 14th European Symposium on Research in Computer Security
        (ESORICS 2009), Saint-Malo, France, September 21-23}, 
  organization = {Springer}, 
  volume = {5789}, 
  year = {2009}, 
  pages = {406--423}, 
  editor = {Michael Backes and Peng Ning}, 
  publisher = {Springer}, 
  series = {Lecture Notes in Computer Science}, 
  abstract = {We present a traffic analysis of the ADU anonymity scheme presented at
        ESORICS 2008, and the related RADU scheme. We show that optimal attacks are able
        to de-anonymize messages more effectively than believed before. Our analysis
        applies to single messages as well as long term observations using multiple
        messages. The search of a {\textquotedblleft}better{\textquotedblright} scheme is
        bound to fail, since we prove that the original Crowds anonymity system provides
        the best security for any given mean messaging latency. Finally we present
        D-Crowds, a scheme that supports any path length distribution, while leaking the
        least possible information, and quantify the optimal attacks against it}, 
  www_section = {anonymity, Crowds, traffic analysis}, 
  isbn = {978-3-642-04443-4}, 
  doi = {10.1007/978-3-642-04444-1}, 
  url = {http://www.springerlink.com/content/t6q86u137t4762k8/}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/DanezisDKT09.pdf}, 
}
DBLP:conf/esorics/MalleshW07
@conference{DBLP:conf/esorics/MalleshW07,
  title = {Countering Statistical Disclosure with Receiver-Bound Cover Traffic}, 
  author = {Nayantara Mallesh and Matthew Wright}, 
  booktitle = {Proceedings of ESORICS 2007, 12th European Symposium On Research In Computer
        Security, Dresden, Germany, September 24-26, 2007, Proceedings}, 
  organization = {Springer}, 
  volume = {4734}, 
  year = {2007}, 
  pages = {547--562}, 
  editor = {Joachim Biskup and Javier Lopez}, 
  publisher = {Springer}, 
  series = {Lecture Notes in Computer Science}, 
  abstract = {Anonymous communications provides an important privacy service by keeping
        passive eavesdroppers from linking communicating parties. However, using
        long-term statistical analysis of traffic sent to and from such a system, it is
        possible to link senders with their receivers. Cover traffic is an effective, but
        somewhat limited, counter strategy against this attack. Earlier work in this area
        proposes that privacy-sensitive users generate and send cover traffic to the
        system. However, users are not online all the time and cannot be expected to send
        consistent levels of cover traffic, drastically reducing the impact of cover
        traffic. We propose that the mix generate cover traffic that mimics the sending
        patterns of users in the system. This receiver-bound cover helps to make up for
        users that aren't there, confusing the attacker. We show through simulation how
        this makes it difficult for an attacker to discern cover from real traffic and
        perform attacks based on statistical analysis. Our results show that
        receiver-bound cover substantially increases the time required for these attacks
        to succeed. When our approach is used in combination with user-generated cover
        traffic, the attack takes a very long time to succeed}, 
  www_section = {anonymity, cover traffic, privacy}, 
  isbn = {978-3-540-74834-2}, 
  doi = {10.1007/978-3-540-74835-9}, 
  url = {http://www.springerlink.com/content/k2146538700m71v7/}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/MalleshW07.pdf}, 
}
DBLP:conf/eurocrypt/ChaumP92
@conference{DBLP:conf/eurocrypt/ChaumP92,
  title = {Transferred Cash Grows in Size}, 
  author = {David Chaum and Torben P. Pedersen}, 
  booktitle = {EUROCRYPT'92 Workshop on the Theory and Application of of Cryptographic
        Techniques}, 
  organization = {Springer}, 
  volume = {658}, 
  year = {1992}, 
  month = {May}, 
  address = {Balatonf{\"u}red, Hungary}, 
  pages = {390--407}, 
  publisher = {Springer}, 
  series = {Lecture Notes in Computer Science}, 
  abstract = {All known methods for transferring electronic money have the disadvantages
        that the number of bits needed to represent the money after each payment
        increases, and that a payer can recognize his money if he sees it later in the
        chain of payments (forward traceability). This paper shows that it is impossible
        to construct an electronic money system providing transferability without the
        property that the money grows when transferred. Furthermore it is argued that an
        unlimited powerful user can always recognize his money later. Finally, the lower
        bounds on the size of transferred electronic money are discussed in terms of
        secret sharing schemes}, 
  www_section = {electronic money, forward traceability, secret shraing, transfer}, 
  isbn = {3-540-56413-6}, 
  doi = {10.1007/3-540-47555-9_32}, 
  url = {10.1007/3-540-47555-9_32}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/EUROCRYPT\%2792_-_Chaun_\%26_Pedersen_-_Transferred_cash_grows_in_size.pdf},
}
DBLP:conf/eurocrypt/RussellW02
@conference{DBLP:conf/eurocrypt/RussellW02,
  title = {How to Fool an Unbounded Adversary with a Short Key}, 
  author = {Alexander Russell and Hong Wang}, 
  booktitle = {How to Fool an Unbounded Adversary with a Short Key}, 
  year = {2002}, 
  pages = {133--148}, 
%%%%% ERROR: Missing field
% www_section = {?????},
}
DBLP:conf/focs/DworkNV12
@conference{DBLP:conf/focs/DworkNV12,
  title = {The Privacy of the Analyst and the Power of the State}, 
  author = {Cynthia Dwork and Moni Naor and Salil P. Vadhan}, 
  booktitle = {The Privacy of the Analyst and the Power of the State}, 
  year = {2012}, 
  pages = {400--409}, 
  www_section = {Unsorted}, 
  url = {https://bibliography.gnunet.org}, 
}
DBLP:conf/icc/ChenCLNC08
@conference{DBLP:conf/icc/ChenCLNC08,
  title = {Experimental Analysis of Super-Seeding in BitTorrent}, 
  author = {Zhijia Chen and Yang Chen and Chuang Lin and Vaibhav Nivargi and Pei Cao}, 
  booktitle = {ICC'08--Proceedings of the 2008 IEEE International Conference on
        Communications}, 
  organization = {IEEE Computer Society}, 
  year = {2008}, 
  month = {May}, 
  address = {Beijing, China}, 
  pages = {65--69}, 
  publisher = {IEEE Computer Society}, 
  abstract = {With the popularity of BitTorrent, improving its performance has been an
        active research area. Super-seeding, a special upload policy for initial seeds,
        improves the efficiency in producing multiple seeds and reduces the uploading
        cost of the initial seeders. However, the overall benefit of super seeding
        remains a question. In this paper, we conduct an experimental study over the
        performance of super-seeding scheme of BitTornado. We attempt to answer the
        following questions: whether and how much super-seeding saves uploading cost,
        whether the download time of all peers is decreased by super-seeding, and in
        which scenario super-seeding performs worse. With varying seed bandwidth and peer
        behavior, we analyze the overall download time and upload cost of super seeding
        scheme during random period tests over 250 widely distributed PlanetLab nodes.
        The results show that benefits of super-seeding depend highly on the upload
        bandwidth of the initial seeds and the behavior of individual peers. Our work not
        only provides reference for the potential adoption of super-seeding in
        BitTorrent, but also much insights for the balance of enhancing Quality of
        Experience (QoE) and saving cost for a large-scale BitTorrent-like P2P commercial
        application}, 
  www_section = {BitTorrent, super-seeding}, 
  isbn = {978-1-4244-2075-9}, 
  doi = {http://dx.doi.org/10.1109/ICC.2008.20}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/ICC\%2708\%20-\%20Super-Seeding\%20in\%20BitTorrent.PDF},
}
DBLP:conf/infocom/ChandraBB04
@conference{DBLP:conf/infocom/ChandraBB04,
  title = {MultiNet: Connecting to Multiple IEEE 802.11 Networks Using a Single Wireless
        Card}, 
  author = {Ranveer Chandra and Victor Bahl and Pradeep Bahl}, 
  booktitle = {INFOCOM}, 
  year = {2004}, 
  abstract = {There are a number of scenarios where it is desirable to have a wireless
        device connect to multiple networks simultaneously. Currently, this is possible
        only by using multiple wireless network cards in the device. Unfortunately, using
        multiple wireless cards causes excessive energy drain and consequent reduction of
        lifetime in battery operated devices. In this paper, we propose a software based
        approach, called MultiNet, that facilitates simultaneous connections to multiple
        networks by virtualizing a single wireless card. The wireless card is virtualized
        by introducing an intermediate layer below IP, which continuously switches the
        card across multiple networks. The goal of the switching algorithm is to be
        transparent to the user who sees her machine as being connected to multiple
        networks. We present the design, implementation, and performance of the MultiNet
        system.We analyze and evaluate buffering and switching algorithms in terms of
        delay and energy consumption. Our system has been operational for over twelve
        months, it is agnostic of the upper layer protocols, and works well over popular
        IEEE 802.11 wireless LAN cards}, 
  url = {http://www.pubzone.org/dblp/conf/infocom/ChandraBB04}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/18_3.PDF}, 
%%%%% ERROR: Missing field
% www_section = {?????},
}
DBLP:conf/infocom/LandaGCMR09
@conference{DBLP:conf/infocom/LandaGCMR09,
  title = {A Sybilproof Indirect Reciprocity Mechanism for Peer-to-Peer Networks}, 
  author = {Raul Leonardo Landa Gamiochipi and David Griffin and Richard G. Clegg and Eleni
        Mykoniati and Miguel Rio}, 
  booktitle = {INFOCOM 2009. The 28th IEEE International Conference on Computer
        Communications}, 
  organization = {IEEE Computer Society}, 
  year = {2009}, 
  month = {April}, 
  address = {Rio de Janeiro, Brazil}, 
  pages = {343--351}, 
  publisher = {IEEE Computer Society}, 
  abstract = {Although direct reciprocity (Tit-for-Tat) contribution systems have been
        successful in reducing free-loading in peer-to-peer overlays, it has been shown
        that, unless the contribution network is dense, they tend to be slow (or may even
        fail) to converge [1]. On the other hand, current indirect reciprocity mechanisms
        based on reputation systems tend to be susceptible to sybil attacks, peer slander
        and whitewashing.In this paper we present PledgeRoute, an accounting mechanism
        for peer contributions that is based on social capital. This mechanism allows
        peers to contribute resources to one set of peers and use this contribution to
        obtain services from a different set of peers, at a different time. PledgeRoute
        is completely decentralised, can be implemented in both structured and
        unstructured peer-to-peer systems, and it is resistant to the three kinds of
        attacks mentioned above.To achieve this, we model contribution transitivity as a
        routing problem in the contribution network of the peer-to-peer overlay, and we
        present arguments for the routing behaviour and the sybilproofness of our
        contribution transfer procedures on this basis. Additionally, we present
        mechanisms for the seeding of the contribution network, and a combination of
        incentive mechanisms and reciprocation policies that motivate peers to adhere to
        the protocol and maximise their service contributions to the overlay}, 
  www_section = {p2p network, reprocity mechanism, sybilproof}, 
  isbn = {978-1-4244-3512-8}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/INFOCOM\%2709\%20-\%20Sybilproof\%20Indirect\%20Reprocity\%20Mechanism\%20for\%20P2P\%20Networks\%20.pdf},
}
DBLP:conf/infocom/ZhangCY07
@conference{DBLP:conf/infocom/ZhangCY07,
  title = {MARCH: A Distributed Incentive Scheme for Peer-to-Peer Networks}, 
  author = {Zhan Zhang and Shigang Chen and MyungKeun Yoon}, 
  booktitle = {INFOCOM 2007. 26th IEEE International Conference on Computer
        Communications}, 
  organization = {IEEE Computer Society}, 
  year = {2007}, 
  month = {May}, 
  address = {Anchorage, Alaska, USA}, 
  pages = {1091--1099}, 
  publisher = {IEEE Computer Society}, 
  abstract = {As peer-to-peer networks grow larger and include more diverse users, the lack
        of incentive to encourage cooperative behavior becomes one of the key problems.
        This challenge cannot be fully met by traditional incentive schemes, which suffer
        from various attacks based on false reports. Especially, due to the lack of
        central authorities in typical P2P systems, it is difficult to detect colluding
        groups. Members in the same colluding group can cooperate to manipulate their
        history information, and the damaging power increases dramatically with the group
        size. In this paper, we propose a new distributed incentive scheme, in which the
        benefit that a node can obtain from the system is proportional to its
        contribution to the system, and a colluding group cannot gain advantage by
        cooperation regardless of its size. Consequently, the damaging power of colluding
        groups is strictly limited. The proposed scheme includes three major components:
        a distributed authority infrastructure, a key sharing protocol, and a contract
        verification protocol}, 
  www_section = {march}, 
  isbn = {1-4244-1047-9}, 
  doi = {http://dx.doi.org/10.1109/INFCOM.2007.131}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/INFOCOM\%2707\%20-\%20MARCH.pdf},
}
DBLP:conf/iptps/DabekZDKS03
@conference{DBLP:conf/iptps/DabekZDKS03,
  title = {Towards a Common API for Structured Peer-to-Peer Overlays}, 
  author = {Dabek, Frank and Ben Y. Zhao and Peter Druschel and John Kubiatowicz and Ion
        Stoica}, 
  booktitle = {IPTPS'03. Proccedings of the Second International Workshop on Peer-to-Peer
        Systems}, 
  organization = {Springer}, 
  volume = {2735}, 
  year = {2003}, 
  month = {February}, 
  address = {Berkeley, CA, USA}, 
  pages = {33--44}, 
  publisher = {Springer}, 
  series = {Lecture Notes in Computer Science}, 
  abstract = {In this paper, we describe an ongoing effort to define common APIs for
        structured peer-to-peer overlays and the key abstractions that can be built on
        them. In doing so, we hope to facilitate independent innovation in overlay
        protocols, services, and applications, to allow direct experimental comparisons,
        and to encourage application development by third parties. We provide a snapshot
        of our efforts and discuss open problems in an effort to solicit feedback from
        the research community}, 
  www_section = {API, key abstraction}, 
  isbn = {3-540-40724-3}, 
  doi = {http://dx.doi.org/10.1007/978-3-540-45172-3_3}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/IPTPS\%2703\%20-\%20Towards\%20a\%20common\%20API.pdf},
}
DBLP:conf/ladc/CourtesKP07
@conference{DBLP:conf/ladc/CourtesKP07,
  title = {Security Rationale for a Cooperative Backup Service for Mobile Devices}, 
  author = {Ludovic Court{\`e}s and Killijian, Marc-Olivier and Powell, David}, 
  booktitle = {LADC}, 
  year = {2007}, 
  pages = {212--230}, 
  abstract = {Mobile devices (e.g., laptops, PDAs, cell phones) are increasingly relied on
        but are used in contexts that put them at risk of physical damage, loss or theft.
        This paper discusses security considerations that arise in the design of a
        cooperative backup service for mobile devices. Participating devices leverage
        encounters with other devices to temporarily replicate critical data. Anyone is
        free to participate in the cooperative service, without requiring any prior trust
        relationship with other participants. In this paper, we identify security threats
        relevant in this context as well as possible solutions and discuss how they map
        to low-level security requirements related to identity and trust establishment.
        We propose self-organized, policy-neutral mechanisms that allow the secure
        designation and identification of participating devices. We show that they can
        serve as a building block for a wide range of cooperation policies that address
        most of the security threats we are concerned with. We conclude on future
        directions}, 
  www_section = {backup, reputation, self-organization}, 
  doi = {10.1007/978-3-540-75294-3}, 
  url = {http://www.springerlink.com/content/p210q274g22j8g77/}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.106.5673.pdf}, 
}
DBLP:conf/middleware/BertierFGKL10
@conference{DBLP:conf/middleware/BertierFGKL10,
  title = {The Gossple Anonymous Social Network}, 
  author = {Marin Bertier and Davide Frey and Rachid Guerraoui and Anne-Marie Kermarrec and
        Vincent Leroy}, 
  booktitle = {Proceedings of the ACM/IFIP/USENIX 11th International Conference on
        Middleware}, 
  organization = {ACM/IFIP/USENIX}, 
  year = {2010}, 
  pages = {191--211}, 
  publisher = {ACM/IFIP/USENIX}, 
  abstract = {While social networks provide news from old buddies, you can learn a lot more
        from people you do not know, but with whom you share many interests. We show in
        this paper how to build a network of anonymous social acquaintances using a
        gossip protocol we call Gossple, and how to leverage such a network to enhance
        navigation within Web 2.0 collaborative applications, {\`a} la LastFM and
        Delicious. Gossple nodes (users) periodically gossip digests of their interest
        profiles and compute their distances (in terms of interest) with respect to other
        nodes. This is achieved with little bandwidth and storage, fast convergence, and
        without revealing which profile is associated with which user. We evaluate
        Gossple on real traces from various Web 2.0 applications with hundreds of
        PlanetLab hosts and thousands of simulated nodes}, 
  www_section = {gossple, social networks}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/gossple2010Bertier.pdf},
  url = {https://bibliography.gnunet.org}, 
}
DBLP:conf/netys/BoutetFJKR13
@conference{DBLP:conf/netys/BoutetFJKR13,
  title = {FreeRec: An Anonymous and Distributed Personalization Architecture}, 
  author = {Antoine Boutet and Davide Frey and Arnaud Jegou and Anne-Marie Kermarrec and
        Heverson B. Ribeiro}, 
  booktitle = {FreeRec: An Anonymous and Distributed Personalization Architecture}, 
  year = {2013}, 
  pages = {58--73}, 
  www_section = {Unsorted}, 
  url = {https://bibliography.gnunet.org}, 
}
DBLP:conf/p2p/AmannEHF08
@conference{DBLP:conf/p2p/AmannEHF08,
  title = {IgorFs: A Distributed P2P File System}, 
  author = {Bernhard Amann and Benedikt Elser and Yaser Houri and Thomas Fuhrmann}, 
  booktitle = {Peer-to-Peer Computing}, 
  year = {2008}, 
  pages = {77--78}, 
  abstract = {IgorFs is a distributed, decentralized peer-to-peer (P2P) file system that is
        completely transparent to the user. It is built on top of the Igor peer-to-peer
        overlay network, which is similar to Chord, but provides additional features like
        service orientation or proximity neighbor and route selection. IgorFs offers an
        efficient means to publish data files that are subject to frequent but minor
        modifications. In our demonstration we show two use cases for IgorFs: the first
        example is (static) software-distribution and the second example is (dynamic)
        file distribution}, 
  www_section = {distributed storage, P2P}, 
  doi = {10.1109/P2P.2008.19}, 
  url = {http://www.pubzone.org/dblp/conf/p2p/AmannEHF08}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/amann08igorfs.pdf}, 
}
DBLP:conf/p2p/EberspacherS05a
@book{DBLP:conf/p2p/EberspacherS05a,
  title = {First and Second Generation of Peer-to-Peer Systems}, 
  author = {J{\"o}rg Ebersp{\"a}cher and R{\"u}diger Schollmeier}, 
  booktitle = {Peer-to-Peer Systems and Applications}, 
  organization = {Springer Berlin / Heidelberg}, 
  volume = {3485}, 
  year = {2005}, 
  pages = {35--56}, 
  publisher = {Springer Berlin / Heidelberg}, 
  series = {Lecture Notes in Computer Science}, 
  abstract = {Peer-to-Peer (P2P) networks appeared roughly around the year 2000 when a
        broadband Internet infrastructure (even at the network edge) became widely
        available. Other than traditional networks Peer-to-Peer networks do not rely on a
        specific infrastructure offering transport services. Instead they form
        {\textquotedblleft}overlay structures{\textquotedblright} focusing on content
        allocation and distribution based on TCP or HTTP connections. Whereas in a
        standard Client-Server configuration content is stored and provided only via some
        central server(s), Peer-to-Peer networks are highly decentralized and locate a
        desired content at some participating peer and provide the corresponding IP
        address of that peer to the searching peer. The download of that content is then
        initiated using a separate connection, often using HTTP. Thus, the high load
        usually resulting for a central server and its surrounding network is avoided
        leading to a more even distribution of load on the underlying physical network.
        On the other hand, such networks are typically subject to frequent changes
        because peers join and leave the network without any central control}, 
  www_section = {generation, P2P, peer-to-peer networking}, 
}
DBLP:conf/saint/SaitoMSSM07
@conference{DBLP:conf/saint/SaitoMSSM07,
  title = {Local Production, Local Consumption: Peer-to-Peer Architecture for a Dependable
        and Sustainable Social Infrastructure}, 
  author = {Saito, Kenji and Morino, Eiichi and Yoshihiko Suko and Takaaki Suzuki and
        Murai, Jun}, 
  booktitle = {SAINT'07. Proceedings of the 2007 Symposium on Applications and the
        Internet}, 
  organization = {IEEE Computer Society}, 
  year = {2007}, 
  month = {January}, 
  address = {Hiroshima, Japan}, 
  pages = {0--58}, 
  publisher = {IEEE Computer Society}, 
  abstract = {Peer-to-peer (P2P) is a system of overlay networks such that participants can
        potentially take symmetrical roles. This translates itself into a design based on
        the philosophy of Local Production, Local Consumption (LPLC), originally an
        agricultural concept to promote sustainable local economy. This philosophy helps
        enhancing survivability of a society by providing a dependable economic
        infrastructure and promoting the power of individuals. This paper attempts to put
        existing works of P2P designs into the perspective of the five-layer architecture
        model to realize LPLC, and proposes future research directions toward integration
        of P2P studies for actualization of a dependable and sustainable social
        infrastructure}, 
  www_section = {LPLC, P2P, peer-to-peer networking}, 
  doi = {http://doi.ieeecomputersociety.org/10.1109/SAINT-W.2007.59}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/SAINT\%2707\%20-\%20Local\%20production\%2C\%20local\%20consumption\%20p2p\%20architecture.pdf},
}
DBLP:conf/sigcomm/JainDPF05
@conference{DBLP:conf/sigcomm/JainDPF05,
  title = {Using redundancy to cope with failures in a delay tolerant network}, 
  author = {Sushant Jain and Michael J. Demmer and Rabin K. Patra and Fall, Kevin}, 
  booktitle = {SIGCOMM}, 
  organization = {ACM New York, NY, USA}, 
  year = {2005}, 
  address = {Philadelphia, Pennsylvania, USA}, 
  pages = {109--120}, 
  publisher = {ACM New York, NY, USA}, 
  abstract = {We consider the problem of routing in a delay tolerant network (DTN) in the
        presence of path failures. Previous work on DTN routing has focused on using
        precisely known network dynamics, which does not account for message losses due
        to link failures, buffer overruns, path selection errors, unscheduled delays, or
        other problems. We show how to split, replicate, and erasure code message
        fragments over multiple delivery paths to optimize the probability of successful
        message delivery. We provide a formulation of this problem and solve it for two
        cases: a 0/1 (Bernoulli) path delivery model where messages are either fully lost
        or delivered, and a Gaussian path delivery model where only a fraction of a
        message may be delivered. Ideas from the modern portfolio theory literature are
        borrowed to solve the underlying optimization problem. Our approach is directly
        relevant to solving similar problems that arise in replica placement in
        distributed file systems and virtual node placement in DHTs. In three different
        simulated DTN scenarios covering a wide range of applications, we show the
        effectiveness of our approach in handling failures}, 
  www_section = {delay tolerant network, routing}, 
  isbn = {1-59593-009-4}, 
  doi = {10.1145/1080091.1080106}, 
  url = {http://portal.acm.org/citation.cfm?doid=1080091.1080106}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/paper-JaiDem.pdf}, 
}
DBLP:conf/sigecom/GhoshR11
@conference{DBLP:conf/sigecom/GhoshR11,
  title = {Selling Privacy at Auction}, 
  author = {Arpita Ghosh and Aaron Roth}, 
  booktitle = {Selling Privacy at Auction}, 
  year = {2011}, 
  pages = {199--208}, 
  url = {https://bibliography.gnunet.org}, 
  www_section = {Unsorted}, 
}
DBLP:conf/sp/DanezisG09
@conference{DBLP:conf/sp/DanezisG09,
  title = {Sphinx: A Compact and Provably Secure Mix Format}, 
  author = {George Danezis and Ian Goldberg}, 
  booktitle = {Proceedings of the 30th IEEE Symposium on Security and Privacy (S\&P
        2009), 17-20 May, Oakland, California, USA}, 
  organization = {IEEE Computer Society}, 
  year = {2009}, 
  pages = {269--282}, 
  publisher = {IEEE Computer Society}, 
  abstract = {Sphinx is a cryptographic message format used to relay anonymized messages
        within a mix network. It is more compact than any comparable scheme, and supports
        a full set of security features: indistinguishable replies, hiding the path
        length and relay position, as well as providing unlinkability for each leg of the
        message's journey over the network. We prove the full cryptographic security of
        Sphinx in the random oracle model, and we describe how it can be used as an
        efficient drop-in replacement in deployed remailer systems}, 
  www_section = {anonymity, cryptography}, 
  isbn = {978-0-7695-3633-0}, 
  doi = {10.1109/SP.2009.15}, 
  url = {http://portal.acm.org/citation.cfm?id=1607723.1608138}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/DanezisG09.pdf}, 
}
DBLP:conf/sp/NarayananS09
@conference{DBLP:conf/sp/NarayananS09,
  title = {De-anonymizing Social Networks}, 
  author = {Arvind Narayanan and Vitaly Shmatikov}, 
  booktitle = {Proceedings of the 30th IEEE Symposium on Security and Privacy (S\&P
        2009), 17-20 May, Oakland, California, USA}, 
  organization = {IEEE Computer Society}, 
  year = {2009}, 
  pages = {173--187}, 
  publisher = {IEEE Computer Society}, 
  abstract = {Operators of online social networks are increasingly sharing potentially
        sensitive information about users and their relationships with advertisers,
        application developers, and data-mining researchers. Privacy is typically
        protected by anonymization, i.e., removing names, addresses, etc. We present a
        framework for analyzing privacy and anonymity in social networks and develop a
        new re-identification algorithm targeting anonymized social-network graphs. To
        demonstrate its effectiveness on real-world networks, we show that a third of the
        users who can be verified to have accounts on both Twitter, a popular
        microblogging service, and Flickr, an online photo-sharing site, can be
        re-identified in the anonymous Twitter graph with only a 12\% error rate. Our
        de-anonymization algorithm is based purely on the network topology, does not
        require creation of a large number of dummy "sybil" nodes, is robust to noise and
        all existing defenses, and works even when the overlap between the target network
        and the adversary's auxiliary information is small}, 
  www_section = {anonymity, network topology, privacy}, 
  isbn = {978-0-7695-3633-0}, 
  url = {http://randomwalker.info/social-networks/}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/NarayananS09.pdf}, 
}
DBLP:conf/sss/Kermarrec09
@conference{DBLP:conf/sss/Kermarrec09,
  title = {Challenges in Personalizing and Decentralizing the Web: An Overview of GOSSPLE}, 
  author = {Anne-Marie Kermarrec}, 
  booktitle = {Challenges in Personalizing and Decentralizing the Web: An Overview of
        GOSSPLE}, 
  year = {2009}, 
  pages = {1--16}, 
  url = {https://bibliography.gnunet.org}, 
  www_section = {Unsorted}, 
}
DBLP:conf/stoc/Ullman13
@conference{DBLP:conf/stoc/Ullman13,
  title = {Answering $n^{2+o(1)}$ Counting Queries with Differential Privacy is Hard}, 
  author = {Jonathan Ullman}, 
  booktitle = {Answering $n^{2+o(1)}$ Counting Queries with Differential Privacy is Hard}, 
  year = {2013}, 
  pages = {361--370}, 
  www_section = {Unsorted}, 
  url = {https://bibliography.gnunet.org}, 
}
DBLP:conf/tridentcom/AlbrechtH10
@conference{DBLP:conf/tridentcom/AlbrechtH10,
  title = {Managing Distributed Applications Using Gush}, 
  author = {Jeannie R. Albrecht and Danny Yuxing Huang}, 
  booktitle = {TRIDENTCOM}, 
  year = {2010}, 
  pages = {401--411}, 
  www_section = {distributed applications, emulation, GENI, PlanetLab, testbed}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/gush.pdf}, 
  url = {https://bibliography.gnunet.org}, 
}
DBLP:conf/tridentcom/HermenierR12
@conference{DBLP:conf/tridentcom/HermenierR12,
  title = {How to Build a Better Testbed: Lessons from a Decade of Network Experiments on
        Emulab}, 
  author = {Fabien Hermenier and Robert Ricci}, 
  booktitle = {TRIDENTCOM}, 
  year = {2012}, 
  pages = {287--304}, 
  www_section = {emulab, emulation, testbed}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/how-to-build-a-better-testbed.pdf},
  url = {https://bibliography.gnunet.org}, 
}
DBLP:conf/tridentcom/NguyenRKFMB10
@conference{DBLP:conf/tridentcom/NguyenRKFMB10,
  title = {How to Build Complex, Large-Scale Emulated Networks}, 
  author = {Hung X. Nguyen and Roughan, Matthew and Knight, Simon and Nick Falkner and
        Maennel, Olaf and Randy Bush}, 
  booktitle = {TRIDENTCOM}, 
  year = {2010}, 
  pages = {3--18}, 
  www_section = {autonetkit, emulation, netkit, network, testbed, virtualization}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/AutoNetkit_0.pdf}, 
  url = {https://bibliography.gnunet.org}, 
}
DBLP:conf/tridentcom/PeralaPML10
@conference{DBLP:conf/tridentcom/PeralaPML10,
  title = {A Novel Testbed for P2P Networks}, 
  author = {Pekka H. J. Per{\"a}l{\"a} and Jori P. Paananen and Milton Mukhopadhyay and
        Jukka-Pekka Laulajainen}, 
  booktitle = {TRIDENTCOM}, 
  year = {2010}, 
  pages = {69--83}, 
  www_section = {emulation, P2P, testbed}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/A_Novel_Testbed_for_P2P_Networks.pdf},
  url = {https://bibliography.gnunet.org}, 
}
DBLP:conf/usenix/HiblerRSDGSWL08
@conference{DBLP:conf/usenix/HiblerRSDGSWL08,
  title = {Large-scale Virtualization in the Emulab Network Testbed}, 
  author = {Mike Hibler and Robert Ricci and Leigh Stoller and Jonathon Duerig and Shashi
        Guruprasad and Tim Stack and Kirk Webb and Jay Lepreau}, 
  booktitle = {USENIX Annual Technical Conference}, 
  year = {2008}, 
  pages = {113--128}, 
  www_section = {emulab, emulation, testbed, virtualization}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/emulab.pdf}, 
}
DBLP:journals/corr/abs-1202-4503
@article{DBLP:journals/corr/abs-1202-4503,
  title = {A Critical Look at Decentralized Personal Data Architectures}, 
  author = {Arvind Narayanan and Vincent Toubiana and Solon Barocas and Helen Nissenbaum
        and Dan Boneh}, 
  journal = {CoRR}, 
  volume = {abs/1202.4503}, 
  year = {2012}, 
  month = {February}, 
  abstract = {While the Internet was conceived as a decentralized network, the most widely
        used web applications today tend toward centralization. Control increasingly
        rests with centralized service providers who, as a consequence, have also amassed
        unprecedented amounts of data about the behaviors and personalities of
        individuals. Developers, regulators, and consumer advocates have looked to
        alternative decentralized architectures as the natural response to threats posed
        by these centralized services. The result has been a great variety of solutions
        that include personal data stores (PDS), infomediaries, Vendor Relationship
        Management (VRM) systems, and federated and distributed social networks. And yet,
        for all these efforts, decentralized personal data architectures have seen little
        adoption. This position paper attempts to account for these failures, challenging
        the accepted wisdom in the web community on the feasibility and desirability of
        these approaches. We start with a historical discussion of the development of
        various categories of decentralized personal data architectures. Then we survey
        the main ideas to illustrate the common themes among these efforts. We tease
        apart the design characteristics of these systems from the social values that
        they (are intended to) promote. We use this understanding to point out numerous
        drawbacks of the decentralization paradigm, some inherent and others incidental.
        We end with recommendations for designers of these systems for working towards
        goals that are achievable, but perhaps more limited in scope and ambition}, 
  www_section = {distributed social networks, economics, personal data stores, policy,
        privacy, web}, 
  url = {https://bibliography.gnunet.org}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/CoRR\%20-\%20Critical\%20look\%20at\%20decentralization.pdf},
}
DBLP:journals/corr/abs-cs-0611016
@article{DBLP:journals/corr/abs-cs-0611016,
  title = {Increasing Data Resilience of Mobile Devices with a Collaborative Backup
        Service}, 
  author = {Damien Martin-Guillerez and Michel Ban{\^a}tre and Paul Couderc}, 
  journal = {CoRR}, 
  volume = {abs/cs/0611016}, 
  year = {2006}, 
  abstract = {Whoever has had his cell phone stolen knows how frustrating it is to be
        unable to get his contact list back. To avoid data loss when losing or destroying
        a mobile device like a PDA or a cell phone, data is usually backed-up to a fixed
        station. However, in the time between the last backup and the failure, important
        data can have been produced and then lost. To handle this issue, we propose a
        transparent collaborative backup system. Indeed, by saving data on other mobile
        devices between two connections to a global infrastructure, we can resist to such
        scenarios. In this paper, after a general description of such a system, we
        present a way to replicate data on mobile devices to attain a prerequired
        resilience for the backup}, 
  url = {http://www.pubzone.org/dblp/journals/corr/abs-cs-0611016}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/0611016v1.pdf}, 
%%%%% ERROR: Missing field
% www_section = {?????},
}
DBLP:journals/ijufks/Sweene02
@article{DBLP:journals/ijufks/Sweene02,
  title = {k-Anonymity: A Model for Protecting Privacy}, 
  author = {Latanya Sweeney}, 
  journal = {International Journal of Uncertainty, Fuzziness and Knowledge-Based Systems}, 
  volume = {10}, 
  number = {5}, 
  year = {2002}, 
  pages = {557--570}, 
%%%%% ERROR: Missing field
% www_section = {?????},
}
DBLP:journals/pvldb/Amer-YahiaBLS08
@article{DBLP:journals/pvldb/Amer-YahiaBLS08,
  title = {Efficient network aware search in collaborative tagging sites}, 
  author = {Sihem Amer-Yahia and Michael Benedikt and Laks V. S. Lakshmanan and Julia
        Stoyanovich}, 
  journal = {PVLDB'08}, 
  volume = {1}, 
  number = {1}, 
  year = {2008}, 
  month = {August}, 
  address = {Auckland, New Zealand}, 
  url = {https://bibliography.gnunet.org}, 
%%%%% ERROR: Missing field
% www_section = {?????},
}
DBLP:journals/tdp/NojimaK09
@article{DBLP:journals/tdp/NojimaK09,
  title = {Cryptographically secure Bloom-filters}, 
  author = {Ryo Nojima and Youki Kadobayashi}, 
  journal = {Transactions on Data Privacy}, 
  volume = {2}, 
  number = {2}, 
  year = {2009}, 
  pages = {131--139}, 
  url = {https://bibliography.gnunet.org}, 
  www_section = {Unsorted}, 
}
DD08Survey
@booklet{DD08Survey,
  title = {A Survey of Anonymous Communication Channels}, 
  author = {George Danezis and Claudia Diaz}, 
  number = {MSR-TR-2008-35}, 
  year = {2008}, 
  month = {January}, 
  publisher = {Microsoft Research}, 
  abstract = {We present an overview of the field of anonymous communications, from its
        establishment in 1981 from David Chaum to today. Key systems are presented
        categorized according to their underlying principles: semi-trusted relays, mix
        systems, remailers, onion routing, and systems to provide robust mixing. We
        include extended discussions of the threat models and usage models that different
        schemes provide, and the trade-offs between the security properties offered and
        the communication characteristics different systems support}, 
  www_section = {onion routing, robustness}, 
  url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.138.7951}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/DD08Survey.pdf}, 
}
DK11
@conference{DK11,
  title = {Practical Privacy-Preserving Multiparty Linear Programming Based on Problem
        Transformation}, 
  author = {Dreier, Jannik and Florian Kerschbaum}, 
  booktitle = {PASSAT'11--Proceedings of the Third IEEE International Conference on
        Information Privacy, Security, Risk and Trust}, 
  organization = {IEEE Computer Society}, 
  year = {2011}, 
  month = {October}, 
  address = {Boston, Massachusetts, USA}, 
  pages = {916--924}, 
  publisher = {IEEE Computer Society}, 
  abstract = {Cryptographic solutions to privacy-preserving multiparty linear programming
        are slow. This makes them unsuitable for many economically important
        applications, such as supply chain optimization, whose size exceeds their
        practically feasible input range. In this paper we present a privacy-preserving
        trans- formation that allows secure outsourcing of the linear program computation
        in an ef?cient manner. We evaluate security by quantifying the leakage about the
        input after the transformation and present implementation results. Using this
        transformation, we can mostly replace the costly cryptographic operations and
        securely solve problems several orders of magnitude larger}, 
  www_section = {cryptography, SMC}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/PASSAT\%2711\%20-\%20Multiparty\%20linear\%20programming.pdf},
  url = {https://bibliography.gnunet.org}, 
}
Dabek:2004:DDL:1251175.1251182
@conference{Dabek:2004:DDL:1251175.1251182,
  title = {Designing a DHT for Low Latency and High Throughput}, 
  author = {Dabek, Frank and Li, Jinyang and Emil Sit and Robertson, James and Frans M.
        Kaashoek and Robert Morris}, 
  booktitle = {NSDI'04--Proceedings of the 1st conference on Symposium on Networked Systems
        Design and Implementation}, 
  organization = {USENIX Association}, 
  year = {2004}, 
  month = {March}, 
  address = {San Francisco, CA, USA}, 
  pages = {7--7}, 
  publisher = {USENIX Association}, 
  abstract = {Designing a wide-area distributed hash table (DHT) that provides
        high-throughput and low-latency network storage is a challenge. Existing systems
        have explored a range of solutions, including iterative routing, recursive
        routing, proximity routing and neighbor selection, erasure coding, replication,
        and server selection. This paper explores the design of these techniques and
        their interaction in a complete system, drawing on the measured performance of a
        new DHT implementation and results from a simulator with an accurate Internet
        latency model. New techniques that resulted from this exploration include use of
        latency predictions based on synthetic co-ordinates, efficient integration of
        lookup routing and data fetching, and a congestion control mechanism suitable for
        fetching data striped over large numbers of servers. Measurements with 425 server
        instances running on 150 PlanetLab and RON hosts show that the latency
        optimizations reduce the time required to locate and fetch data by a factor of
        two. The throughput optimizations result in a sustainable bulk read throughput
        related to the number of DHT hosts times the capacity of the slowest access link;
        with 150 selected PlanetLab hosts, the peak aggregate throughput over multiple
        clients is 12.8 megabytes per second}, 
  www_section = {distributed hash table, high-throughput, latency}, 
  url = {http://dl.acm.org/citation.cfm?id=1251175.1251182}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/NSDI\%2704\%20-\%20Designing\%20a\%20DHT\%20for\%20low\%20latency\%20and\%20high\%20throughput.pdf},
}
Dabek:2004:VDN:1030194.1015471
@article{Dabek:2004:VDN:1030194.1015471,
  title = {Vivaldi: a decentralized network coordinate system}, 
  author = {Dabek, Frank and Russ Cox and Frans M. Kaashoek and Robert Morris}, 
  journal = {SIGCOMM Computer Communication Review}, 
  volume = {34}, 
  year = {2004}, 
  month = {October}, 
  address = {New York, NY, USA}, 
  pages = {15--26}, 
  publisher = {ACM}, 
  abstract = {Large-scale Internet applications can benefit from an ability to predict
        round-trip times to other hosts without having to contact them first. Explicit
        measurements are often unattractive because the cost of measurement can outweigh
        the benefits of exploiting proximity information. Vivaldi is a simple,
        light-weight algorithm that assigns synthetic coordinates to hosts such that the
        distance between the coordinates of two hosts accurately predicts the
        communication latency between the hosts. Vivaldi is fully distributed, requiring
        no fixed network infrastructure and no distinguished hosts. It is also efficient:
        a new host can compute good coordinates for itself after collecting latency
        information from only a few other hosts. Because it requires little
        com-munication, Vivaldi can piggy-back on the communication patterns of the
        application using it and scale to a large number of hosts. An evaluation of
        Vivaldi using a simulated network whose latencies are based on measurements among
        1740 Internet hosts shows that a 2-dimensional Euclidean model with height
        vectors embeds these hosts with low error (the median relative error in
        round-trip time prediction is 11 percent)}, 
  www_section = {internet topology, network coordinates, Vivaldi}, 
  issn = {0146-4833}, 
  doi = {http://doi.acm.org/10.1145/1030194.1015471}, 
  url = {http://doi.acm.org/10.1145/1030194.1015471}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/SIGCOMM\%20Compt.\%20Commun.\%20Rev.\%20-\%20Vivaldi.pdf},
}
Damiani02areputation-based
@conference{Damiani02areputation-based,
  title = {A Reputation-Based Approach for Choosing Reliable Resources in Peer-to-Peer
        Networks}, 
  author = {Ernesto Damiani and Sabrina De Capitani di Vimercati and Stefano Paraboschi and
        Pierangela Samarati and Fabio Violante}, 
  booktitle = {In Proceedings of the 9th ACM Conference on Computer and Communications
        Security}, 
  organization = {ACM Press}, 
  year = {2002}, 
  pages = {207--216}, 
  publisher = {ACM Press}, 
  abstract = {Peer-to-peer (P2P) applications have seen an enormous success, and recently
        introduced P2P services have reached tens of millions of users. A feature that
        significantly contributes to the success of many P2P applications is user
        anonymity. However, anonymity opens the door to possible misuses and abuses,
        exploiting the P2P network as a way to spread tampered with resources, including
        Trojan Horses, viruses, and spam. To address this problem we propose a
        self-regulating system where the P2P network is used to implement a robust
        reputation mechanism. Reputation sharing is realized through a distributed
        polling algorithm by which resource requestors can assess the reliability of a
        resource offered by a participant before initiating the download. This way,
        spreading of malicious contents will be reduced and eventually blocked. Our
        approach can be straightforwardly piggybacked on existing P2P protocols and
        requires modest modifications to current implementations}, 
  url = {http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.7.1784\&rep=rep1\&type=pdf},
%%%%% ERROR: Missing field
% www_section = {?????},
}
Damiani02areputation-based_0
@conference{Damiani02areputation-based_0,
  title = {A Reputation-Based Approach for Choosing Reliable Resources in Peer-to-Peer
        Networks}, 
  author = {Ernesto Damiani and Sabrina De Capitani di Vimercati and Stefano Paraboschi and
        Pierangela Samarati and Fabio Violante}, 
  booktitle = {In Proceedings of the 9th ACM Conference on Computer and Communications
        Security}, 
  organization = {ACM Press}, 
  year = {2002}, 
  pages = {207--216}, 
  publisher = {ACM Press}, 
  abstract = {Peer-to-peer (P2P) applications have seen an enormous success, and recently
        introduced P2P services have reached tens of millions of users. A feature that
        significantly contributes to the success of many P2P applications is user
        anonymity. However, anonymity opens the door to possible misuses and abuses,
        exploiting the P2P network as a way to spread tampered with resources, including
        Trojan Horses, viruses, and spam. To address this problem we propose a
        self-regulating system where the P2P network is used to implement a robust
        reputation mechanism. Reputation sharing is realized through a distributed
        polling algorithm by which resource requestors can assess the reliability of a
        resource offered by a participant before initiating the download. This way,
        spreading of malicious contents will be reduced and eventually blocked. Our
        approach can be straightforwardly piggybacked on existing P2P protocols and
        requires modest modifications to current implementations}, 
  url = {http://seclab.dti.unimi.it/Papers/ccs02.ps}, 
%%%%% ERROR: Missing field
% www_section = {?????},
}
Dan:SFMix03
@conference{Dan:SFMix03,
  title = {Forward Secure Mixes}, 
  author = {George Danezis}, 
  booktitle = {Proceedings of 7th Nordic Workshop on Secure IT Systems}, 
  year = {2002}, 
  month = {November}, 
  address = {Karlstad, Sweden}, 
  pages = {195--207}, 
  editor = {Fisher-Hubner, Jonsson}, 
  abstract = {New threats such as compulsion to reveal logs, secret and private keys as
        well as to decrypt material are studied in the context of the security of mix
        networks. After a comparison of this new threat model with the traditional one, a
        new construction is introduced, the fs-mix, that minimizes the impact that such
        powers have on the security of the network, by using forward secure communication
        channels and key updating operation inside the mixes. A discussion about the
        forward security of these new proposals and some extensions is included}, 
  www_section = {anonymity, forward security, mix, traffic analysis}, 
  url = {http://citeseer.ist.psu.edu/533725.html}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Dan-SFMix03.pdf}, 
}
DanSer04
@conference{DanSer04,
  title = {Statistical Disclosure or Intersection Attacks on Anonymity Systems}, 
  author = {George Danezis and Andrei Serjantov}, 
  booktitle = {Proceedings of 6th Information Hiding Workshop (IH 2004)}, 
  organization = {Springer Berlin / Heidelberg}, 
  year = {2004}, 
  month = {May}, 
  address = {Toronto}, 
  publisher = {Springer Berlin / Heidelberg}, 
  series = {LNCS}, 
  abstract = {In this paper we look at the information an attacker can extract using a
        statistical disclosure attack. We provide analytical results about the anonymity
        of users when they repeatedly send messages through a threshold mix following the
        model of Kesdogan, Agrawal and Penz [7] and through a pool mix. We then present a
        statistical disclosure attack that can be used to attack models of anonymous
        communication networks based on pool mixes. Careful approximations make the
        attack computationally efficient. Such models are potentially better suited to
        derive results that could apply to the security of real anonymous communication
        networks}, 
  www_section = {anonymity, statistical analysis}, 
  isbn = {978-3-540-24207-9}, 
  doi = {10.1007/b104759}, 
  url = {http://www.springerlink.com/content/tqljb3hybk4rubla/}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.6.2954.pdf}, 
}
Danezis03mixminion:design
@conference{Danezis03mixminion:design,
  title = {Mixminion: Design of a Type III Anonymous Remailer Protocol}, 
  author = {George Danezis and Roger Dingledine and Nick Mathewson}, 
  booktitle = {In Proceedings of the 2003 IEEE Symposium on Security and Privacy}, 
  year = {2003}, 
  pages = {2--15}, 
  abstract = {We present Mixminion, a message-based anonymous remailer protocol with secure
        single-use reply blocks. Mix nodes cannot distinguish Mixminion forward messages
        from reply messages, so forward and reply messages share the same anonymity set.
        We add directory servers that allow users to learn public keys and performance
        statistics of participating remailers, and we describe nymservers that provide
        long-term pseudonyms using single-use reply blocks as a primitive. Our design
        integrates link encryption between remailers to provide forward anonymity.
        Mixminion works in a real-world Internet environment, requires little
        synchronization or coordination between nodes, and protects against known
        anonymity-breaking attacks as well as or better than other systems with similar
        design parameters. 1. Overview Chaum first introduced anonymous remailers over 20
        years ago [7]}, 
  url = {http://mixminion.net/minion-design.pdf}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/minion-design.pdf}, 
%%%%% ERROR: Missing field
% www_section = {?????},
}
Danezis04theeconomics
@conference{Danezis04theeconomics,
  title = {The Economics of Censorship Resistance}, 
  author = {George Danezis and Ross Anderson}, 
  booktitle = {In The Third Annual Workshop on Economics and Information Security (WEIS04}, 
  year = {2004}, 
  abstract = {We propose the first economic model of censorship resistance. Early
        peer-to-peer systems, such as the Eternity Service, sought to achieve censorshop
        resistance by distributing content randomly over the whole Internet. An
        alternative approach is to encourage nodes to serve resources they are interested
        in. Both architectures have been implemented but so far there has been no
        quantitative analysis of the protection they provide. We develop a model inspired
        by economics and con ict theory to analyse these systems. Under our assumptions,
        resource distribution according to nodes' individual preferences provides better
        stability and resistance to censorship. Our results may have wider application
        too}, 
  url = {http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.4.7003\&rep=rep1\&type=pdf},
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.4.7003\%20\%281\%29.pdf},
%%%%% ERROR: Missing field
% www_section = {?????},
}
Danezis05sybil-resistantdht
@conference{Danezis05sybil-resistantdht,
  title = {Sybil-resistant DHT routing}, 
  author = {George Danezis and Chris Lesniewski-laas and Frans M. Kaashoek and Ross
        Anderson}, 
  booktitle = {In ESORICS}, 
  organization = {Springer}, 
  year = {2005}, 
  pages = {305--318}, 
  publisher = {Springer}, 
  abstract = {Distributed Hash Tables (DHTs) are very efficient distributed systems for
        routing, but at the same time vulnerable to disruptive nodes. Designers of such
        systems want them used in open networks, where an adversary can perform a sybil
        attack by introducing a large number of corrupt nodes in the network,
        considerably degrading its performance. We introduce a routing strategy that
        alleviates some of the effects of such an attack by making sure that lookups are
        performed using a diverse set of nodes. This ensures that at least some of the
        nodes queried are good, and hence the search makes forward progress. This
        strategy makes use of latent social information present in the introduction graph
        of the network}, 
  www_section = {distributed hash table, routing}, 
  url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.65.3947}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/sybildht.pdf}, 
}
Delerablee:2007:IBE:1781454.1781471
@conference{Delerablee:2007:IBE:1781454.1781471,
  title = {Identity-based broadcast encryption with constant size ciphertexts and private
        keys}, 
  author = {Delerabl{\'e}e, C{\'e}cile}, 
  booktitle = {ASIACRYPT 2007--Proceedings of the Advances in Cryptology 13th International
        Conference on Theory and Application of Cryptology and Information Security}, 
  organization = {Springer-Verlag}, 
  year = {2007}, 
  month = {December}, 
  address = {Kuching, Malaysia}, 
  pages = {200--215}, 
  publisher = {Springer-Verlag}, 
  series = {ASIACRYPT'07}, 
  abstract = {This paper describes the first identity-based broadcast encryption scheme
        (IBBE) with constant size ciphertexts and private keys. In our scheme, the public
        key is of size linear in the maximal size m of the set of receivers, which is
        smaller than the number of possible users (identities) in the system. Compared
        with a recent broadcast encryption system introduced by Boneh, Gentry and Waters
        (BGW), our system has comparable properties, but with a better efficiency: the
        public key is shorter than in BGW. Moreover, the total number of possible users
        in the system does not have to be fixed in the setup}, 
  www_section = {ciphertext, encryption, IBBE, private key}, 
  isbn = {3-540-76899-8, 978-3-540-76899-9}, 
  url = {http://dl.acm.org/citation.cfm?id=1781454.1781471}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/ASIACRYPT\%2707\%20-\%20IBBE\%20with\%20constant\%20size\%20ciphertexts\%20and\%20private\%20keys.pdf},
}
Demers94thebayou
@booklet{Demers94thebayou,
  title = {The Bayou Architecture: Support for Data Sharing among Mobile Users}, 
  author = {Alan Demers and Karin Petersen and Mike Spreitzer and Douglas Terry and Marvin
        Theimer and Brent Welch}, 
  year = {1994}, 
  abstract = {The Bayou System is a platform of replicated, highly-available,
        variable-consistency, mobile databases on which to build collaborative
        applications. This paper presents the preliminary system architecture along with
        the design goals that influenced it. We take a fresh, bottom-up and critical look
        at the requirements of mobile computing applications and carefully pull together
        both new and existing techniques into an overall architecture that meets these
        requirements. Our emphasis is on supporting application-specific conflict
        detection and resolution and on providing application controlled inconsistency}, 
  www_section = {Unsorted}, 
  doi = {10.1109/WMCSA.1994.37}, 
  url = {http://portal.acm.org/citation.cfm?id=1440028}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.40.8955.pdf}, 
}
Department01instrumentingthe
@conference{Department01instrumentingthe,
  title = {Instrumenting The World With Wireless Sensor Networks}, 
  author = {Deborah Estrin and Gregory J. Pottie and L. Girod and Mani Srivastava}, 
  booktitle = {In International Conference on Acoustics, Speech, and Signal Processing
        (ICASSP 2001)}, 
  year = {2001}, 
  pages = {2033--2036}, 
  abstract = {Pervasive micro-sensing and actuation may revolutionize the way in which we
        understand and manage complex physical systems: from airplane wings to complex
        ecosystems. The capabilities for detailed physical monitoring and manipulation
        offer enormous opportunities for almost every scientific discipline, and it will
        alter the feasible granularity of engineering}, 
  www_section = {wireless sensor network}, 
  url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.3.3027}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.3.3027.pdf}, 
}
Deswarte91intrusiontolerance
@conference{Deswarte91intrusiontolerance,
  title = {Intrusion Tolerance in Distributed Computing Systems}, 
  author = {Yves Deswarte and Laurent Blain and Jean-charles Fabre}, 
  booktitle = {In Proceedings of the IEEE Symposium on Research in Security and Privacy}, 
  year = {1991}, 
  pages = {110--121}, 
  abstract = {An intrusion-tolerant distributed system is a system which is designed so
        that any intrusion into apart of the system will not endanger confidentiality,
        integrity and availability. This approach is suitable for distributed systems,
        because distribution enables isolation of elements so that an intrusion gives
        physical access to only a part of the system. By intrusion, we mean not only
        computer break-ins by non-registered people, but also attempts by registered
        users to exceed or to abuse their privileges. In particular, possible malice of
        security administrators is taken into account. This paper describes how some
        functions of distributed systems can be designed to tolerate intrusions, in
        particular security functions such as user authentication and authorization, and
        application functions such as file management}, 
  url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.56.9968}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.56.9968.pdf}, 
  www_section = {Unsorted}, 
}
Diaz02
@conference{Diaz02,
  title = {Towards measuring anonymity}, 
  author = {Claudia Diaz and Stefaan Seys and Joris Claessens and Bart Preneel}, 
  booktitle = {Proceedings of Privacy Enhancing Technologies Workshop (PET 2002)}, 
  organization = {Springer-Verlag, LNCS 2482}, 
  year = {2002}, 
  month = {April}, 
  editor = {Roger Dingledine and Paul Syverson}, 
  publisher = {Springer-Verlag, LNCS 2482}, 
  abstract = {This paper introduces an information theoretic model that allows to quantify
        the degree of anonymity provided by schemes for anonymous connections. It
        considers attackers that obtain probabilistic information about users. The degree
        is based on the probabilities an attacker, after observing the system, assigns to
        the different users of the system as being the originators of a message. As a
        proof of concept, the model is applied to some existing systems. The model is
        shown to be very useful for evaluating the level of privacy a system provides
        under various attack scenarios, for measuring the amount of information an
        attacker gets with a particular attack and for comparing different systems
        amongst each other}, 
  www_section = {anonymity, attack, privacy}, 
  isbn = {978-3-540-00565-0}, 
  doi = {10.1007/3-540-36467-6}, 
  url = {http://www.springerlink.com/content/3qb837jkpgukc6b5/}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/article-89.pdf}, 
}
Diaz02towardsmeasuring
@booklet{Diaz02towardsmeasuring,
  title = {Towards Measuring Anonymity}, 
  author = {Claudia Diaz and Stefaan Seys and Joris Claessens and Bart Preneel}, 
  year = {2002}, 
  publisher = {Springer-Verlag}, 
  abstract = {This paper introduces an information theoretic model that allows to quantify
        the degree of anonymity provided by schemes for anonymous connections. It
        considers attackers that obtain probabilistic information about users. The degree
        is based on the probabilities an attacker, after observing the system, assigns to
        the dierent users of the system as being the originators of a message. As a proof
        of concept, the model is applied to some existing systems. The model is shown to
        be very useful for evaluating the level of privacy a system provides under
        various attack scenarios, for measuring the amount of information an attacker
        gets with a particular attack and for comparing dierent systems amongst each
        other}, 
  url = {http://www.cosic.esat.kuleuven.be/publications/article-89.pdf}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/anonimity.pdf}, 
%%%%% ERROR: Missing field
% www_section = {?????},
}
DiazThesis05
@mastersthesis{DiazThesis05,
  title = {Anonymity and Privacy in Electronic Services}, 
  author = {Claudia Diaz}, 
  school = {Katholieke Universiteit Leuven}, 
  year = {2005}, 
  month = {December}, 
  address = {Leuven, Belgium}, 
  type = {phd}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/DiazThesis05.pdf}, 
%%%%% ERROR: Missing field
% www_section = {?????},
}
Dimakis:2010:NCD:1861840.1861868
@conference{Dimakis:2010:NCD:1861840.1861868,
  title = {Network coding for distributed storage systems}, 
  author = {Dimakis, Alexandros G. and Godfrey, Brighten and Wu, Yunnan and Wainwright,
        Martin J. and Ramchandran, Kannan}, 
  booktitle = {INFOCOM 2007. 26th IEEE International Conference on Computer
        Communications}, 
  organization = {IEEE Press}, 
  volume = {56}, 
  year = {2007}, 
  month = {May}, 
  address = {Anchorage, Alaska, USA}, 
  pages = {4539--4551}, 
  publisher = {IEEE Press}, 
  abstract = {Distributed storage systems provide reliable access to data through
        redundancy spread over individually unreliable nodes. Application scenarios
        include data centers, peer-to-peer storage systems, and storage in wireless
        networks. Storing data using an erasure code, in fragments spread across nodes,
        requires less redundancy than simple replication for the same level of
        reliability. However, since fragments must be periodically replaced as nodes
        fail, a key question is how to generate encoded fragments in a distributed way
        while transferring as little data as possible across the network. For an erasure
        coded system, a common practice to repair from a single node failure is for a new
        node to reconstruct the whole encoded data object to generate just one encoded
        block. We show that this procedure is sub-optimal. We introduce the notion of
        regenerating codes, which allow a new node to communicate functions of the stored
        data from the surviving nodes. We show that regenerating codes can significantly
        reduce the repair bandwidth. Further, we show that there is a fundamental
        tradeoff between storage and repair bandwidth which we theoretically characterize
        using flow arguments on an appropriately constructed graph. By invoking
        constructive results in network coding, we introduce regenerating codes that can
        achieve any point in this optimal tradeoff}, 
  www_section = {distributed storage, network coding, peer-to-peer storage, Regenerating
        Codes}, 
  issn = {0018-9448}, 
  doi = {http://dx.doi.org/10.1109/TIT.2010.2054295}, 
  url = {http://dx.doi.org/10.1109/TIT.2010.2054295}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/INFOCOM\%2707\%20-\%20Network\%20coding\%20for\%20distributed\%20storage\%20systems.pdf},
}
Dingledine01areputation
@booklet{Dingledine01areputation,
  title = {A Reputation System to Increase MIX-net Reliability}, 
  author = {Roger Dingledine and Michael J. Freedman and David Hopwood and David Molnar}, 
  year = {2001}, 
  abstract = {We describe a design for a reputation system that increases the reliability
        and thus efficiency of remailer services. Our reputation system uses a MIX-net in
        which MIXes give receipts for intermediate messages. Together with a set of
        witnesses, these receipts allow senders to verify the correctness of each MIX and
        prove misbehavior to the witnesses}, 
  url = {http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.19.7912\&rep=rep1\&type=pdf},
%%%%% ERROR: Missing field
% www_section = {?????},
}
Dingledine02reliablemix
@conference{Dingledine02reliablemix,
  title = {Reliable MIX Cascade Networks through Reputation}, 
  author = {Roger Dingledine and Paul Syverson}, 
  booktitle = {Financial Cryptography. Springer-Verlag, LNCS 2357}, 
  organization = {Springer Verlag}, 
  year = {2002}, 
  publisher = {Springer Verlag}, 
  abstract = {We describe a MIX cascade protocol and a reputation system that together
        increase the reliability of a network of MIX cascades. In our protocol, MIX nodes
        periodically generate a communally random seed that, along with their
        reputations, determines cascade configuration}, 
  url = {http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.19.9316\&rep=rep1\&type=pdf},
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.19.9316.pdf}, 
%%%%% ERROR: Missing field
% www_section = {?????},
}
Dingledine03reputationin
@conference{Dingledine03reputationin,
  title = {Reputation in P2P Anonymity Systems}, 
  author = {Roger Dingledine and Nick Mathewson and Paul Syverson}, 
  booktitle = {In Workshop on Economics of Peer-to-Peer Systems}, 
  year = {2003}, 
  abstract = {Decentralized anonymity systems tend to be unreliable, because users must
        choose nodes in the network without knowing the entire state of the network.
        Reputation systems promise to improve reliability by predicting network state. In
        this paper we focus on anonymous remailers and anonymous publishing, explain why
        the systems can benefit from reputation, and describe our experiences designing
        reputation systems for them while still ensuring anonymity. We find that in each
        example we first must redesign the underlying anonymity system to support
        verifiable transactions}, 
  www_section = {anonymity, P2P, redundancy, remailer}, 
  url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.14.4740}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.14.4740.pdf}, 
}
Dischinger:2008:DBB:1452520.1452523
@conference{Dischinger:2008:DBB:1452520.1452523,
  title = {Detecting BitTorrent Blocking}, 
  author = {Dischinger, Marcel and Mislove, Alan and Haeberlen, Andreas and P. Krishna
        Gummadi}, 
  booktitle = {IMC'08. Proceedings of the 8th ACM SIGCOMM conference on Internet
        measurement}, 
  organization = {ACM}, 
  year = {2008}, 
  month = {October}, 
  address = {Vouliagmeni, Greece}, 
  pages = {3--8}, 
  publisher = {ACM}, 
  series = {IMC '08}, 
  abstract = {Recently, it has been reported that certain access ISPs are surreptitiously
        blocking their customers from uploading data using the popular BitTorrent
        file-sharing protocol. The reports have sparked an intense and wide-ranging
        policy debate on network neutrality and ISP traffic management practices.
        However, to date, end users lack access to measurement tools that can detect
        whether their access ISPs are blocking their BitTorrent traffic. And since ISPs
        do not voluntarily disclose their traffic management policies, no one knows how
        widely BitTorrent traffic blocking is deployed in the current Internet. In this
        paper, we address this problem by designing an easy-to-use tool to detect
        BitTorrent blocking and by presenting results from a widely used public
        deployment of the tool}, 
  www_section = {BitTorrent, blocking, network measurement}, 
  isbn = {978-1-60558-334-1}, 
  doi = {http://doi.acm.org/10.1145/1452520.1452523}, 
  url = {http://doi.acm.org/10.1145/1452520.1452523}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/IMC\%2708\%20-\%20Detecting\%20BitTorrent\%20Blocking.pdf},
}
DistributedSearch2014Hermann
@conference{DistributedSearch2014Hermann,
  title = {Censorship-Resistant and Privacy-Preserving Distributed Web Search}, 
  author = {Michael Herrmann and Ren Zhang and Kai-Chun Ning and Claudia Diaz}, 
  booktitle = {IEEE International Conference on Peer to Peer computing}, 
  year = {2014}, 
  abstract = {The vast majority of Internet users are relying on centralized search engine
        providers to conduct their web searches. However, search results can be censored
        and search queries can be recorded by these providers without the user's
        knowledge. Distributed web search engines based on peer-to-peer networks have
        been proposed to mitigate these threats. In this paper we analyze the three most
        popular real-world distributed web search engines: Faroo, Seeks and Yacy, with
        respect to their censorship resistance and privacy protection. We show that none
        of them provides an adequate level of protection against an adversary with modest
        resources. Recognizing these flaws, we identify security properties a
        censorship-resistant and privacy-preserving distributed web search engine should
        provide. We propose two novel defense mechanisms called node density protocol and
        webpage verification protocol to achieve censorship resistance and show their
        effectiveness and feasibility with simulations. Finally, we elaborate on how
        state-of-the-art defense mechanisms achieve privacy protection in distributed web
        search engines}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/DistributedSearch2014Hermann.pdf},
  www_section = {Unsorted}, 
  url = {https://bibliography.gnunet.org}, 
}
Douceur01competitivehill-climbing
@conference{Douceur01competitivehill-climbing,
  title = {Competitive Hill-Climbing Strategies for Replica Placement in a Distributed File
        System}, 
  author = {John R. Douceur and Roger Wattenhofer}, 
  booktitle = {In DISC}, 
  year = {2001}, 
  pages = {48--62}, 
  abstract = {The Farsite distributed file system stores multiple replicas of files on
        multiple machines, to provide file access even when some machines are
        unavailable. Farsite assigns file replicas to machines so as to maximally exploit
        the different degrees of availability of different machines, given an allowable
        replication factor R. We use competitive analysis and simulation to study the
        performance of three candidate hillclimbing replica placement strategies, MinMax,
        MinRand, and RandRand, each of which successively exchanges the locations of two
        file replicas. We show that the MinRand and RandRand strategies are perfectly
        competitive for R = 2 and 2/3-competitive for R = 3. For general R, MinRand is at
        least 1/2-competitive and RandRand is at least 10/17-competitive. The MinMax
        strategy is not competitive. Simulation results show better performance than the
        theoretic worst-case bounds}, 
  url = {http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.22.6802\&rep=rep1\&type=pdf},
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/hill\%20climbing.pdf}, 
%%%%% ERROR: Missing field
% www_section = {?????},
}
Douceur:2002:RSD:850928.851884
@conference{Douceur:2002:RSD:850928.851884,
  title = {Reclaiming Space from Duplicate Files in a Serverless Distributed File System}, 
  author = {John R. Douceur and Adya, Atul and Bolosky, William J. and Simon, Dan and
        Marvin Theimer}, 
  booktitle = {ICDCS'02--Proceedings of the 22nd International Conference on Distributed
        Computing Systems (ICDCS'02)}, 
  organization = {IEEE Computer Society}, 
  year = {2002}, 
  month = {July}, 
  address = {Vienna, Austria}, 
  pages = {0--617}, 
  publisher = {IEEE Computer Society}, 
  series = {ICDCS '02}, 
  abstract = {The Farsite distributed file system provides availability by replicating each
        file onto multiple desktop computers. Since this replication consumes significant
        storage space, it is important to reclaim used space where possible. Measurement
        of over 500 desktop file systems shows that nearly half of all consumed space is
        occupied by duplicate files. We present a mechanism to reclaim space from this
        incidental duplication to make it available for controlled file replication. Our
        mechanism includes: (1) convergent encryption, which enables duplicate files to
        be coalesced into the space of a single file, even if the files are encrypted
        with different users' keys; and (2) SALAD, a Self-Arranging Lossy Associative
        Database for aggregating file content and location information in a
        decentralized, scalable, fault-tolerant manner. Large-scale simulation
        experiments show that the duplicate-file coalescing system is scalable, highly
        effective, and fault-tolerant}, 
  www_section = {convergent encryption, distributed file system, duplicate files, farsite,
        SALAD, serverless}, 
  isbn = {0-7695-1585-1}, 
  doi = {http://dx.doi.org/10.1109/ICDCS.2002.1022312}, 
  url = {http://dl.acm.org/citation.cfm?id=850928.851884}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/ICDCS\%2702\%20-\%20Reclaiming\%20space\%20for\%20duplicate\%20files.pdf},
}
Douceur:2002:SA:646334.687813
@conference{Douceur:2002:SA:646334.687813,
  title = {The Sybil Attack}, 
  author = {John R. Douceur}, 
  booktitle = {IPTPS'01--Revised Papers from the First International Workshop on
        Peer-to-Peer Systems}, 
  organization = {Springer-Verlag London}, 
  year = {2002}, 
  month = {March}, 
  address = {Cambridge, MA}, 
  pages = {251--260}, 
  publisher = {Springer-Verlag London}, 
  series = {Revised Papers from the First International Workshop on Peer-to-Peer Systems}, 
  abstract = {Large-scale peer-to-peer systems face security threats from faulty or hostile
        remote computing elements. To resist these threats, many such systems employ
        redundancy. However, if a single faulty entity can present multiple identities,
        it can control a substantial fraction of the system, thereby undermining this
        redundancy. One approach to preventing these "Sybil attacks" is to have a trusted
        agency certify identities. This paper shows that, without a logically centralized
        authority, Sybil attacks are always possible except under extreme and unrealistic
        assumptions of resource parity and coordination among entities}, 
  www_section = {attack, peer-to-peer networking, security threat, Sybil attack}, 
  isbn = {3-540-44179-4}, 
  url = {http://dl.acm.org/citation.cfm?id=646334.687813}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/IPTPS\%2702\%20-\%20Douceur\%20-\%20The\%20Sybil\%20Attack.pdf},
}
Douceur:2002:SDS:784592.784803
@conference{Douceur:2002:SDS:784592.784803,
  title = {A Secure Directory Service based on Exclusive Encryption}, 
  author = {John R. Douceur and Adya, Atul and Benaloh, Josh and Bolosky, William J. and
        Yuval, Gideon}, 
  booktitle = {ACSAC'02--Proceedings of the 18th Annual Computer Security Applications
        Conference}, 
  organization = {IEEE Computer Society}, 
  year = {2002}, 
  month = {December}, 
  address = {San Diego, CA, USA}, 
  pages = {0--172}, 
  publisher = {IEEE Computer Society}, 
  series = {ACSAC '02}, 
  abstract = {We describe the design of a Windows file-system directory service that
        ensures the persistence, integrity, privacy, syntactic legality, and
        case-insensitive uniqueness of the names it indexes. Byzantine state replication
        provides persistence and integrity, and encryption imparts privacy. To enforce
        Windows' baroque name syntax--including restrictions on allowable characters, on
        the terminal character, and on several specific names--we develop a cryptographic
        process, called "exclusive encryption," that inherently excludes syntactically
        illegal names and that enables the exclusion of case-insensitively duplicate
        names without access to their plaintext. This process excludes entire names by
        mapping the set of allowed strings to the set of all strings, excludes certain
        characters through an amended prefix encoding, excludes terminal characters
        through varying the prefix coding by character index, and supports
        case-insensitive comparison of names by extracting and encrypting case
        information separately. We also address the issues of hiding name-length
        information and access-authorization information, and we report a newly
        discovered problem with enforcing case-insensitive uniqueness for Unicode names}, 
  www_section = {directory service, encryption, exclusive encryption, Windows}, 
  isbn = {0-7695-1828-1}, 
  doi = {http://dx.doi.org/10.1109/CSAC.2002.1176289}, 
  url = {http://dl.acm.org/citation.cfm?id=784592.784803}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/ACSAC\%2702\%20-\%20A\%20secure\%20directory\%20service\%20based\%20on\%20exclusive\%20encryption.pdf},
}
Douglas-thesis
@mastersthesis{Douglas-thesis,
  title = {A taxonomy for and analysis of anonymous communications networks}, 
  author = {Douglas Kelly}, 
  school = {Air Force Institute of Technology}, 
  year = {2009}, 
  month = {March}, 
  type = {phd}, 
  abstract = {Any entity operating in cyberspace is susceptible to debilitating attacks.
        With cyber attacks intended to gather intelligence and disrupt communications
        rapidly replacing the threat of conventional and nuclear attacks, a new age of
        warfare is at hand. In 2003, the United States acknowledged that the speed and
        anonymity of cyber attacks makes distinguishing among the actions of terrorists,
        criminals, and nation states difficult. Even President Obama's Cybersecurity
        Chief-elect feels challenged by the increasing sophistication of cyber attacks.
        Indeed, the rising quantity and ubiquity of new surveillance technologies in
        cyberspace enables instant, undetectable, and unsolicited information collection
        about entities. Hence, anonymity and privacy are becoming increasingly important
        issues. Anonymization enables entities to protect their data and systems from a
        diverse set of cyber attacks and preserve privacy. This research provides a
        systematic analysis of anonymity degradation, preservation and elimination in
        cyberspace to enchance the security of information assets. This includes
        discovery/obfuscation of identities and actions of/from potential adversaries.
        First, novel taxonomies are developed for classifying and comparing the wide
        variety of well-established and state-of-the-art anonymous networking protocols.
        These expand the classical definition of anonymity and are the first known to
        capture the peer-to-peer and mobile ad hoc anonymous protocol family
        relationships. Second, a unique synthesis of state-of-the-art anonymity metrics
        is provided. This significantly aids an entities ability to reliably measure
        changing anonymity levels; thereby, increasing their ability to defend against
        cyber attacks. Finally, a novel epistemic-based model is created to characterize
        how an adversary reasons with knowledge to degrade anonymity}, 
  url = {http://oai.dtic.mil/oai/oai?verb=getRecord\&metadataPrefix=html\&identifier=ADA495688},
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Douglas-thesis.pdf}, 
%%%%% ERROR: Missing field
% www_section = {?????},
}
Druschel01past:a
@conference{Druschel01past:a,
  title = {PAST: A large-scale, persistent peer-to-peer storage utility}, 
  author = {Peter Druschel and Antony Rowstron}, 
  booktitle = {In HotOS VIII}, 
  year = {2001}, 
  pages = {75--80}, 
  abstract = {This paper sketches the design of PAST, a large-scale, Internet-based, global
        storage utility that provides scalability, high availability, persistence and
        security. PAST is a peer-to-peer Internet application and is entirely
        selforganizing. PAST nodes serve as access points for clients, participate in the
        routing of client requests, and contribute storage to the system. Nodes are not
        trusted, they may join the system at any time and may silently leave the system
        without warning. Yet, the system is able to provide strong assurances, efficient
        storage access, load balancing and scalability}, 
  www_section = {peer-to-peer storage}, 
  url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.1.1674}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.1.1674.pdf}, 
}
Duminuco:2009:PSR:1584339.1584602
@conference{Duminuco:2009:PSR:1584339.1584602,
  title = {A Practical Study of Regenerating Codes for Peer-to-Peer Backup Systems}, 
  author = {Alessandro Duminuco and E W Biersack}, 
  booktitle = {ICDCS'09--Proceedings of the 29th IEEE International Conference on
        Distributed Computing Systems}, 
  organization = {IEEE Computer Society}, 
  year = {2009}, 
  month = {June}, 
  address = {Montreal, Qu{\'e}bec, Canada}, 
  pages = {376--384}, 
  publisher = {IEEE Computer Society}, 
  series = {ICDCS '09}, 
  abstract = {In distributed storage systems, erasure codes represent an attractive
        solution to add redundancy to stored data while limiting the storage overhead.
        They are able to provide the same reliability as replication requiring much less
        storage space. Erasure coding breaks the data into pieces that are encoded and
        then stored on different nodes. However, when storage nodes permanently abandon
        the system, new redundant pieces must be created. For erasure codes, generating a
        new piece requires the transmission of k pieces over the network, resulting in a
        k times higher reconstruction traffic as compared to replication. Dimakis
        proposed a new class of codes, called Regenerating Codes, which are able to
        provide both the storage efficiency of erasure codes and the communication
        efficiency of replication. However, Dimakis gave only a theoretical description
        of the codes without discussing implementation issues or computational costs. We
        have done a real implementation of Random Linear Regenerating Codes that allows
        us to measure their computational cost, which can be significant if the
        parameters are not chosen properly. However, we also find that there exist
        parameter values that result in a significant reduction of the communication
        overhead at the expense of a small increase in storage cost and computation,
        which makes these codes very attractive for distributed storage systems}, 
  www_section = {Backup Systems, erasure codes, evaluation, peer-to-peer networking,
        Regenerating Codes, storage}, 
  isbn = {978-0-7695-3659-0}, 
  doi = {http://dx.doi.org/10.1109/ICDCS.2009.14}, 
  url = {http://dx.doi.org/10.1109/ICDCS.2009.14}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/ICDCS\%2709\%20-\%20Regenerating\%20codes\%20for\%20p2p\%20backup\%20systems.pdf},
}
Dwork2007
@conference{Dwork2007,
  title = {The Price of Privacy and the Limits of LP Decoding}, 
  author = {Cynthia Dwork and Frank D. McSherry and Kunal Talwar}, 
  booktitle = {The Price of Privacy and the Limits of LP Decoding}, 
  year = {2007}, 
  pages = {85--94}, 
%%%%% ERROR: Missing field
% www_section = {?????},
}
EURECOM+2885
@article{EURECOM+2885,
  title = {Long term study of peer behavior in the KAD DHT}, 
  author = {Steiner, Moritz and En-Najjary, Taoufik and E W Biersack}, 
  journal = {IEEE/ACM Transactions on Networking}, 
  volume = {17}, 
  year = {2009}, 
  month = {May}, 
  chapter = {1371}, 
  abstract = {Distributed hash tables (DHTs) have been actively studied in literature and
        many different proposals have been made on how to organize peers in a DHT.
        However, very few DHTs have been implemented in real systems and deployed on a
        large scale. One exception is KAD, a DHT based on Kademlia, which is part of
        eDonkey, a peer-to-peer file sharing system with several million simultaneous
        users. We have been crawling a representative subset of KAD every five minutes
        for six months and obtained information about geographical distribution of peers,
        session times, daily usage, and peer lifetime. We have found that session times
        are Weibull distributed and we show how this information can be exploited to make
        the publishing mechanism much more efficient. Peers are identified by the
        so-called KAD ID, which up to now was assumed to be persistent. However, we
        observed that a fraction of peers changes their KAD ID as frequently as once a
        session. This change of KAD IDs makes it difficult to characterize end-user
        behavior. For this reason we have been crawling the entire KAD network once a day
        for more than a year to track end-users with static IP addresses, which allows us
        to estimate end-user lifetime and the fraction of end-users changing their KAD
        ID}, 
  www_section = {churn, distributed hash table, KAD, Kademlia}, 
  issn = {1063-6692}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Long\%20Term\%20Study\%20of\%20Peer\%20Behavior\%20in\%20the\%20kad\%20DHT.pdf},
  url = {https://bibliography.gnunet.org}, 
}
EdmanSY07
@article{EdmanSY07,
  title = {A Combinatorial Approach to Measuring Anonymity}, 
  author = {Matthew Edman and Fikret Sivrikaya and B{\"u}lent Yener}, 
  journal = {Intelligence and Security Informatics, 2007 IEEE}, 
  year = {2007}, 
  month = {May}, 
  pages = {356--363}, 
  abstract = {In this paper we define a new metric for quantifying the degree of anonymity
        collectively afforded to users of an anonymous communication system. We show how
        our metric, based on the permanent of a matrix, can be useful in evaluating the
        amount of information needed by an observer to reveal the communication pattern
        as a whole. We also show how our model can be extended to include probabilistic
        information learned by an attacker about possible sender-recipient relationships.
        Our work is intended to serve as a complementary tool to existing
        information-theoretic metrics, which typically consider the anonymity of the
        system from the perspective of a single user or message}, 
  www_section = {anonymity}, 
  isbn = {142441329X}, 
  doi = {10.1109/ISI.2007.379497}, 
  url = {http://www.mendeley.com/research/a-combinatorial-approach-to-measuring-anonymity/},
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/EdmanSY07.pdf}, 
}
ElGamal:1985:PKC:19478.19480
@conference{ElGamal:1985:PKC:19478.19480,
  title = {A Public Key Cryptosystem and a Signature Scheme Based on Discrete Logarithms}, 
  author = {El Gamal, Taher}, 
  booktitle = {Proceedings of CRYPTO 84 on Advances in cryptology}, 
  organization = {Springer-Verlag New York, Inc}, 
  year = {1985}, 
  month = {January}, 
  address = {Santa Barbara, California}, 
  pages = {10--18}, 
  publisher = {Springer-Verlag New York, Inc}, 
  abstract = {A new signature scheme is proposed together with an implementation of the
        Diffie--Hellman key distribution scheme that achieves a public key cryptosystem.
        The security of both systems relies on the difficulty of computing discrete
        logarithms over finite fields}, 
  www_section = {cryptosystem, discrete logarithms, public key, signature scheme}, 
  isbn = {0-387-15658-5}, 
  url = {http://dl.acm.org/citation.cfm?id=19478.19480s}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/CRYPTO\%2784\%20-\%20El\%20Gamal\%20-\%20Public\%20Key\%20Cryptosystem.pdf},
}
Electrical04designingincentives
@conference{Electrical04designingincentives,
  title = {Designing Incentives for Peer-to-Peer Routing}, 
  author = {Alberto Blanc and Yi-Kai Liu and Vahdat, Amin}, 
  booktitle = {INFOCOM 2005, 24th Annual Joint Conference of the IEEE Computer and
        Communications Societies}, 
  organization = {IEEE Computer Society}, 
  volume = {1}, 
  year = {2005}, 
  month = {March}, 
  address = {Miami, FL, USA}, 
  pages = {374--385}, 
  publisher = {IEEE Computer Society}, 
  abstract = {In a peer-to-peer network, nodes are typically required to route packets for
        each other. This leads to a problem of "free-loaders", nodes that use the network
        but refuse to route other nodes' packets. In this paper we study ways of
        designing incentives to discourage free-loading. We model the interactions
        between nodes as a "random matching game", and describe a simple reputation
        system that provides incentives for good behavior. Under certain assumptions, we
        obtain a stable subgame-perfect equilibrium. We use simulations to investigate
        the robustness of this scheme in the presence of noise and malicious nodes, and
        we examine some of the design trade-offs. We also evaluate some possible
        adversarial strategies, and discuss how our results might apply to real
        peer-to-peer systems}, 
  www_section = {economics, free-loader, free-loading, peer-to-peer networking, system
        design}, 
  isbn = {0743-166X}, 
  issn = {0-7803-8968-9}, 
  doi = {10.1109/INFCOM.2005.1497907}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/INFOCOM\%2705\%20-\%20Designing\%20incentives\%20for\%20peer-to-peer\%20routing.pdf},
}
Eppstein:2011:WDE:2018436.2018462
@conference{Eppstein:2011:WDE:2018436.2018462,
  title = {What's the difference?: efficient set reconciliation without prior context}, 
  author = {Eppstein, David and Goodrich, Michael T. and Uyeda, Frank and Varghese,
        George}, 
  booktitle = {Proceedings of the ACM SIGCOMM 2011 conference}, 
  organization = {ACM}, 
  year = {2011}, 
  address = {New York, NY, USA}, 
  pages = {218--229}, 
  publisher = {ACM}, 
  series = {SIGCOMM '11}, 
  www_section = {difference digest, GNUnet, invertible bloom filter, set difference}, 
  isbn = {978-1-4503-0797-0}, 
  doi = {10.1145/2018436.2018462}, 
  url = {http://doi.acm.org/10.1145/2018436.2018462}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/EppGooUye-SIGCOMM-11.pdf},
}
Eschenauer02akey-management
@conference{Eschenauer02akey-management,
  title = {A Key-Management Scheme for Distributed Sensor Networks}, 
  author = {Laurent Eschenauer and Virgil D. Gligor}, 
  booktitle = {In Proceedings of the 9th ACM Conference on Computer and Communications
        Security}, 
  organization = {ACM Press}, 
  year = {2002}, 
  pages = {41--47}, 
  publisher = {ACM Press}, 
  abstract = {Distributed Sensor Networks (DSNs) are ad-hoc mobile networks that include
        sensor nodes with limited computation and communication capabilities. DSNs are
        dynamic in the sense that they allow addition and deletion of sensor nodes after
        deployment to grow the network or replace failing and unreliable nodes. DSNs may
        be deployed in hostile areas where communication is monitored and nodes are
        subject to capture and surreptitious use by an adversary. Hence DSNs require
        cryptographic protection of communications, sensorcapture detection, key
        revocation and sensor disabling. In this paper, we present a key-management
        scheme designed to satisfy both operational and security requirements of DSNs}, 
  www_section = {DNS, mobile Ad-hoc networks}, 
  url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.19.9193}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.19.9193.pdf}, 
}
Eugster:2003:LPB:945506.945507
@article{Eugster:2003:LPB:945506.945507,
  title = {Lightweight probabilistic broadcast}, 
  author = {Patrick Eugster and Rachid Guerraoui and Sidath B. Handurukande and Petr
        Kouznetsov and Anne-Marie Kermarrec}, 
  journal = {ACM Trans. Comput. Syst}, 
  volume = {21}, 
  year = {2003}, 
  month = {November}, 
  address = {New York, NY, USA}, 
  pages = {341--374}, 
  publisher = {ACM}, 
  www_section = {Broadcast, buffering, garbage collection, gossip, noise, randomization,
        reliability, scalability}, 
  issn = {0734-2071}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/lightweight_prob_broadcast.pdf},
}
Fakult04peerstore:better
@booklet{Fakult04peerstore:better,
  title = {PeerStore: Better Performance by Relaxing in Peer-to-Peer Backup}, 
  author = {Martin Landers and Han Zhang and Kian-Lee Tan}, 
  year = {2004}, 
  abstract = {Backup is cumbersome. To be effective, backups have to be made at regular
        intervals, forcing users to organize and store a growing collection of backup
        media. In this paper we propose a novel Peer-to-Peer backup system, PeerStore,
        that allows the user to store his backups on other people's computers instead.
        PeerStore is an adaptive, cost-effective system suitable for all types of
        networks ranging from LAN, WAN to large unstable networks like the Internet. The
        system consists of two layers: metadata layer and symmetric trading layer.
        Locating blocks and duplicate checking is accomplished by the metadata layer
        while the actual data distribution is done between pairs of peers after they have
        established a symmetric data trade. By decoupling the metadata management from
        data storage, the system offers a significant reduction of the maintenance cost
        and preserves fairness among peers. Results show that PeerStore has a reduced
        maintenance cost comparing to pStore. PeerStore also realizes fairness because of
        the symmetric nature of the trades}, 
  www_section = {backup, P2P}, 
  url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.58.8067}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/peerstore-better-performance-by.pdf},
}
Fargier:1996:MCS:1892875.1892901
@conference{Fargier:1996:MCS:1892875.1892901,
  title = {Mixed constraint satisfaction: a framework for decision problems under
        incomplete knowledge}, 
  author = {Fargier, H{\'e}l{\`e}ne and Lang, J{\'e}r{\^o}me and Schiex, Thomas}, 
  booktitle = {AAAI'96--Proceedings of the 13th National Conference on Artificial
        Intelligence}, 
  organization = {AAAI Press}, 
  year = {1996}, 
  month = {August}, 
  address = {Portland, OR, United States}, 
  pages = {175--180}, 
  publisher = {AAAI Press}, 
  series = {AAAI'96}, 
  abstract = {Constraint satisfaction is a powerful tool for representing and solving
        decision problems with complete knowledge about the world. We extend the CSP
        framework so as to represent decision problems under incomplete knowledge. The
        basis of the extension consists in a distinction between controllable and
        uncontrollable variables -- hence the terminology "mixed CSP" -- and a "solution"
        gives actually a conditional decision. We study the complexity of deciding the
        consistency of a mixed CSP. As the problem is generally intractable, we propose
        an algorithm for finding an approximate solution}, 
  www_section = {algorithms, constraint satisfaction, decision problem, framework,
        imcomplete knowledge, mixed CSP}, 
  isbn = {0-262-51091-X}, 
  url = {http://dl.acm.org/citation.cfm?id=1892875.1892901}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/AAAI\%2796\%20-\%20Mixed\%20constraint\%20satisfaction.pdf},
}
Feamster02infranet:circumventing
@conference{Feamster02infranet:circumventing,
  title = {Infranet: Circumventing Web Censorship and Surveillance}, 
  author = {Nick Feamster and Magdalena Balazinska and Greg Harfst and Hari Balakrishnan
        and David Karger}, 
  booktitle = {In Proceedings of the 11th USENIX Security Symposium}, 
  organization = {Association}, 
  year = {2002}, 
  pages = {247--262}, 
  publisher = {Association}, 
  abstract = {An increasing number of countries and companies routinely block or monitor
        access to parts of the Internet. To counteract these measures, we propose
        Infranet, a system that enables clients to surreptitiously retrieve sensitive
        content via cooperating Web servers distributed across the global Internet. These
        Infranet servers provide clients access to censored sites while continuing to
        host normal uncensored content. Infranet uses a tunnel protocol that provides a
        covert communication channel between its clients and servers, modulated over
        standard HTTP transactions that resemble innocuous Web browsing. In the upstream
        direction, Infranet clients send covert messages to Infranet servers by
        associating meaning to the sequence of HTTP requests being made. In the
        downstream direction, Infranet servers return content by hiding censored data in
        uncensored images using steganographic techniques. We describe the design, a
        prototype implementation, security properties, and performance of Infranet. Our
        security analysis shows that Infranet can successfully circumvent several
        sophisticated censoring techniques}, 
  url = {http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.18.5049\&rep=rep1\&type=pdf},
%%%%% ERROR: Missing field
% www_section = {?????},
}
Feigenbaum:2002:DAM:570810.570812
@conference{Feigenbaum:2002:DAM:570810.570812,
  title = {Distributed algorithmic mechanism design: recent results and future directions}, 
  author = {Feigenbaum, Joan and S Shenker}, 
  booktitle = {DIALM'06. Proceedings of the 6th international workshop on Discrete
        algorithms and methods for mobile computing and communications}, 
  organization = {ACM}, 
  year = {2002}, 
  month = {September}, 
  address = {Atlanta, Georgia}, 
  pages = {1--13}, 
  publisher = {ACM}, 
  series = {DIALM '02}, 
  abstract = {Distributed Algorithmic Mechanism Design (DAMD) combines theoretical computer
        science's traditional focus on computational tractability with its more recent
        interest in incentive compatibility and distributed computing. The Internet's
        decentralized nature, in which distributed computation and autonomous agents
        prevail, makes DAMD a very natural approach for many Internet problems. This
        paper first outlines the basics of DAMD and then reviews previous DAMD results on
        multicast cost sharing and interdomain routing. The remainder of the paper
        describes several promising research directions and poses some specific open
        problems}, 
  www_section = {algorithmic mechanism design, algorithms, distributed computation,
        multicast, routing}, 
  isbn = {1-58113-587-4}, 
  doi = {http://doi.acm.org/10.1145/570810.570812}, 
  url = {http://jmvidal.cse.sc.edu/library/feigenbaum02a.pdf}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/DIALM\%2702\%20-\%20Feigenbaum\%20\%26\%20Shenker\%20-\%20Distributed\%20algorithmic\%20mechanism\%20design.pdf},
}
Feigenbaum:2006:IIR:1134707.1134722
@conference{Feigenbaum:2006:IIR:1134707.1134722,
  title = {Incentive-compatible interdomain routing}, 
  author = {Feigenbaum, Joan and Ramachandran, Vijay and Schapira, Michael}, 
  booktitle = {EC'06. Proceedings of the 7th ACM Conference on Electronic Commerce}, 
  organization = {ACM}, 
  year = {2006}, 
  month = {June}, 
  address = {Arbor, Michigan}, 
  pages = {130--139}, 
  publisher = {ACM}, 
  series = {EC '06}, 
  abstract = {The routing of traffic between Internet domains, or Autonomous Systems
        (ASes), a task known as interdomain routing, is currently handled by the Border
        Gateway Protocol (BGP). Using BGP, autonomous systems can apply semantically rich
        routing policies to choose interdomain routes in a distributed fashion. This
        expressiveness in routing-policy choice supports domains' autonomy in network
        operations and in business decisions, but it comes at a price: The interaction of
        locally defined routing policies can lead to unexpected global anomalies,
        including route oscillations or overall protocol divergence. Networking
        researchers have addressed this problem by devising constraints on policies that
        guarantee BGP convergence without unduly limiting expressiveness and autonomy.In
        addition to taking this engineering or "protocol-design" approach, researchers
        have approached interdomain routing from an economic or "mechanism-design" point
        of view. It is known that lowest-cost-path (LCP) routing can be implemented in a
        truthful, BGP-compatible manner but that several other natural classes of routing
        policies cannot. In this paper, we present a natural class of interdomain-routing
        policies that is more realistic than LCP routing and admits incentive-compatible,
        BGP-compatible implementation. We also present several positive steps toward a
        general theory of incentive-compatible interdomain routing}, 
  www_section = {border gateway protocol (BGP), distributed algorithmic mechanism design,
        interdomain routing}, 
  isbn = {1-59593-236-4}, 
  doi = {http://doi.acm.org/10.1145/1134707.1134722}, 
  url = {http://doi.acm.org/10.1145/1134707.1134722}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/EC\%2706\%20-\%20Incentive-compatible\%20interdomain\%20routing.pdf},
}
Feldman03quantifyingdisincentives
@conference{Feldman03quantifyingdisincentives,
  title = {Quantifying Disincentives in Peer-to-Peer Networks}, 
  author = {Michal Feldman and Kevin Lai and John Chuang and Ion Stoica}, 
  booktitle = {Workshop on Economics of Peer-to-Peer Systems}, 
  year = {2003}, 
  month = {June}, 
  address = {Berkeley, CA}, 
  abstract = {In this paper, we use modeling and simulation to better understand the
        effects of cooperation on user performance and to quantify the performance-based
        disincentives in a peer-to-peer file sharing system. This is the first step
        towards building an incentive system. For the models developed in this paper, we
        have the following results: Although performance improves significantly when
        cooperation increases from low to moderate levels, the improvement diminishes
        thereafter. In particular, the mean delay to download a file when 5\% of the
        nodes share files is 8x more than when 40\% of the nodes share files, while the
        mean download delay when 40\% of the nodes share is only 1.75x more than when
        100\% share}, 
  www_section = {incentives, peer-to-peer networking}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Feldman\%2C\%20Lai\%2C\%20Chuang\%20\%26\%20Stoica\%20-\%20Quantifying\%20disincentives\%20in\%20peer-to-peer\%20networks.pdf},
}
Feldman:2004:FWP:1016527.1016539
@conference{Feldman:2004:FWP:1016527.1016539,
  title = {Free-riding and whitewashing in peer-to-peer systems}, 
  author = {Michal Feldman and Papadimitriou, Christos and John Chuang and Ion Stoica}, 
  booktitle = {PINS'04. Proceedings of the ACM SIGCOMM Workshop on Practice and Theory of
        Incentives in Networked Systems}, 
  organization = {ACM}, 
  year = {2004}, 
  month = {August}, 
  address = {Portland, OR}, 
  pages = {228--236}, 
  publisher = {ACM}, 
  series = {PINS '04}, 
  abstract = {We develop a model to study the phenomenon of free-riding in peer-to-peer
        (P2P) systems. At the heart of our model is a user of a certain type, an
        intrinsic and private parameter that reflects the user's willingness to
        contribute resources to the system. A user decides whether to contribute or
        free-ride based on how the current contribution cost in the system compares to
        her type. When the societal generosity (i.e., the average type) is low,
        intervention is required in order to sustain the system. We present the effect of
        mechanisms that exclude low type users or, more realistic, penalize free-riders
        with degraded service. We also consider dynamic scenarios with arrivals and
        departures of users, and with whitewashers: users who leave the system and rejoin
        with new identities to avoid reputational penalties. We find that when penalty is
        imposed on all newcomers in order to avoid whitewashing, system performance
        degrades significantly only when the turnover rate among users is high}, 
  www_section = {cheap pseudonyms, cooperation, equilibrium, exclusion, free-riding,
        identity cost, incentives, peer-to-peer networking, whitewashing}, 
  isbn = {1-58113-942-X}, 
  doi = {http://doi.acm.org/10.1145/1016527.1016539}, 
  url = {http://doi.acm.org/10.1145/1016527.1016539}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/PINS\%2704\%20-\%20\%20Free-riding\%20and\%20whitewashing\%20in\%20P2P\%20systems.pdf},
}
Feldman:2004:RIT:988772.988788
@conference{Feldman:2004:RIT:988772.988788,
  title = {Robust incentive techniques for peer-to-peer networks}, 
  author = {Michal Feldman and Kevin Lai and Ion Stoica and John Chuang}, 
  booktitle = {EC'04. Proceedings of the 5th ACM Conference on Electronic Commerce}, 
  organization = {ACM}, 
  year = {2004}, 
  month = {May}, 
  address = {New York, NY, USA}, 
  pages = {102--111}, 
  publisher = {ACM}, 
  series = {EC '04}, 
  abstract = {Lack of cooperation (free riding) is one of the key problems that confronts
        today's P2P systems. What makes this problem particularly difficult is the unique
        set of challenges that P2P systems pose: large populations, high turnover, a
        symmetry of interest, collusion, zero-cost identities, and traitors. To tackle
        these challenges we model the P2P system using the Generalized Prisoner's Dilemma
        (GPD),and propose the Reciprocative decision function as the basis of a family of
        incentives techniques. These techniques are fullydistributed and include:
        discriminating server selection, maxflow-based subjective reputation, and
        adaptive stranger policies. Through simulation, we show that these techniques can
        drive a system of strategic users to nearly optimal levels of cooperation}, 
  www_section = {cheap pseudonyms, collusion, free-riding, incentives, peer-to-peer
        networking, prisoners dilemma, reputation, whitewash, whitewashing}, 
  isbn = {1-58113-771-0}, 
  doi = {http://doi.acm.org/10.1145/988772.988788}, 
  url = {http://doi.acm.org/10.1145/988772.988788}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/EC\%2704\%20-\%20Robust\%20incentive\%20techniques\%20for\%20P2P\%20networks.pdf},
}
Feldman:2005:HMR:1064009.1064022
@conference{Feldman:2005:HMR:1064009.1064022,
  title = {Hidden-action in multi-hop routing}, 
  author = {Michal Feldman and John Chuang and Ion Stoica and S Shenker}, 
  booktitle = {EC'05. Proceedings of the 6th ACM Conference on Electronic Commerce}, 
  organization = {ACM}, 
  year = {2005}, 
  month = {June}, 
  address = {Vancouver, Canada}, 
  pages = {117--126}, 
  publisher = {ACM}, 
  series = {EC '05}, 
  abstract = {In multi-hop networks, the actions taken by individual intermediate nodes are
        typically hidden from the communicating endpoints; all the endpoints can observe
        is whether or not the end-to-end transmission was successful. Therefore, in the
        absence of incentives to the contrary, rational (i.e., selfish) intermediate
        nodes may choose to forward packets at a low priority or simply not forward
        packets at all. Using a principal-agent model, we show how the hidden-action
        problem can be overcome through appropriate design of contracts, in both the
        direct (the endpoints contract with each individual router) and recursive (each
        router contracts with the next downstream router) cases. We further demonstrate
        that per-hop monitoring does not necessarily improve the utility of the principal
        or the social welfare in the system. In addition, we generalize existing
        mechanisms that deal with hidden-information to handle scenarios involving both
        hidden-information and hidden-action}, 
  www_section = {contracts, hidden-action, incentives, mechanism design, moral-hazard,
        multi-hop, principal-agent model, routing}, 
  isbn = {1-59593-049-3}, 
  doi = {http://doi.acm.org/10.1145/1064009.1064022}, 
  url = {http://doi.acm.org/10.1145/1064009.1064022}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/EC\%2705\%20-\%20Hidden-action\%20in\%20multi-hop\%20routing.pdf},
}
Feldman:2005:OFB:1120717.1120723
@article{Feldman:2005:OFB:1120717.1120723,
  title = {Overcoming free-riding behavior in peer-to-peer systems}, 
  author = {Michal Feldman and John Chuang}, 
  journal = {ACM SIGecom Exchanges}, 
  volume = {5}, 
  year = {2005}, 
  month = {July}, 
  address = {New York, NY, USA}, 
  pages = {41--50}, 
  publisher = {ACM}, 
  abstract = {While the fundamental premise of peer-to-peer (P2P) systems is that of
        voluntary resource sharing among individual peers, there is an inherent tension
        between individual rationality and collective welfare that threatens the
        viability of these systems. This paper surveys recent research at the
        intersection of economics and computer science that targets the design of
        distributed systems consisting of rational participants with diverse and selfish
        interests. In particular, we discuss major findings and open questions related to
        free-riding in P2P systems: factors affecting the degree of free-riding,
        incentive mechanisms to encourage user cooperation, and challenges in the design
        of incentive mechanisms for P2P systems}, 
  www_section = {algorithms, cooperation, design, economics, game-theory, hidden-action,
        hidden-information, incentives, peer-to-peer networking, performance,
        reliability}, 
  issn = {1551-9031}, 
  doi = {http://doi.acm.org/10.1145/1120717.1120723}, 
  url = {http://doi.acm.org/10.1145/1120717.1120723}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/SIGecom\%20Exch.\%20\%285\%29\%20-\%20Overcoming\%20free-riding\%20behavior.pdf},
}
FessiIPTComm2010
@conference{FessiIPTComm2010,
  title = {Pr2-P2PSIP: Privacy Preserving P2P Signaling for VoIP and IM}, 
  author = {Fessi, Ali and Nathan S Evans and Heiko Niedermayer and Ralph Holz}, 
  booktitle = {Principles, Systems and Applications of IP Telecommunications (IPTComm),
        Munich}, 
  year = {2010}, 
  month = {August}, 
  address = {Munich, Germany}, 
  pages = {141--152}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/fessi_iptcomm_2010.pdf},
  url = {https://bibliography.gnunet.org}, 
  www_section = {Unsorted}, 
}
Fiat02censorshipresistant
@booklet{Fiat02censorshipresistant,
  title = {Censorship Resistant Peer-to-Peer Content Addressable Networks}, 
  author = {Amos Fiat and Jared Saia}, 
  year = {2002}, 
  abstract = {We present a censorship resistant peer-to-peer network for accessing n data
        items in a network of n nodes. Each search for a data item in the network takes
        O(log n) time and requires at most O(log2n) messages. Our network is censorship
        resistant in the sense that even after adversarial removal of an arbitrarily
        large constant fraction of the nodes in the network, all but an arbitrarily small
        fraction of the remaining nodes can obtain all but an arbitrarily small fraction
        of the original data items. The network can be created in a fully distributed
        fashion. It requires only O(log n) memory in each node. We also give a variant of
        our scheme that has the property that it is highly spam resistant: an adversary
        can take over complete control of a constant fraction of the nodes in the network
        and yet will still be unable to generate spam}, 
  url = {http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.16.4761\&rep=rep1\&type=pdf},
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.16.4761.pdf}, 
%%%%% ERROR: Missing field
% www_section = {?????},
}
Fiat05makingchord
@conference{Fiat05makingchord,
  title = {Making chord robust to byzantine attacks}, 
  author = {Amos Fiat and Jared Saia and Maxwell Young}, 
  booktitle = {In Proc. of the European Symposium on Algorithms (ESA)}, 
  organization = {Springer}, 
  year = {2005}, 
  pages = {803--814}, 
  publisher = {Springer}, 
  abstract = {Chord is a distributed hash table (DHT) that requires only O(log n) links per
        node and performs searches with latency and message cost O(log n), where n is the
        number of peers in the network. Chord assumes all nodes behave according to
        protocol. We give a variant of Chord which is robust with high probability for
        any time period during which: 1) there are always at least z total peers in the
        network for some integer z; 2) there are never more than (1/4--{\epsilon})z
        Byzantine peers in the network for a fixed {\epsilon} > 0; and 3) the number of
        peer insertion and deletion events is no more than zk for some tunable parameter
        k. We assume there is an adversary controlling the Byzantine peers and that the
        IP-addresses of all the Byzantine peers and the locations where they join the
        network are carefully selected by this adversary. Our notion of robustness is
        rather strong in that we not only guarantee that searches can be performed but
        also that we can enforce any set of {\textquotedblleft}proper
        behavior{\textquotedblright} such as contributing new material, etc. In
        comparison to Chord, the resources required by this new variant are only a
        polylogarithmic factor greater in communication, messaging, and linking costs}, 
  www_section = {Chord, distributed hash table, robustness}, 
  doi = {10.1007/11561071}, 
  url = {http://www.springerlink.com/content/422llxn7khwej72n/}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/swarm.pdf}, 
}
Fitzi:2006:OEM:1146381.1146407
@conference{Fitzi:2006:OEM:1146381.1146407,
  title = {Optimally efficient multi-valued byzantine agreement}, 
  author = {Fitzi, Matthias and Hirt, Martin}, 
  booktitle = {Proceedings of the twenty-fifth annual ACM symposium on Principles of
        distributed computing}, 
  organization = {ACM}, 
  year = {2006}, 
  address = {New York, NY, USA}, 
  pages = {163--168}, 
  publisher = {ACM}, 
  series = {PODC '06}, 
  www_section = {byzantine agreement, communication complexity, cryptographic security,
        information-theoretic security}, 
  isbn = {1-59593-384-0}, 
  doi = {10.1145/1146381.1146407}, 
  url = {http://doi.acm.org/10.1145/1146381.1146407}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/FitHir06.pdf}, 
}
Fragouli:2006:NCI:1111322.1111337
@article{Fragouli:2006:NCI:1111322.1111337,
  title = {Network Coding: an Instant Primer}, 
  author = {Fragouli, Christina and Jean-Yves Le Boudec and J{\"o}rg Widmer}, 
  journal = {SIGCOMM Computer Communication Review}, 
  volume = {36}, 
  year = {2006}, 
  month = {January}, 
  address = {New York, NY, USA}, 
  pages = {63--68}, 
  publisher = {ACM}, 
  abstract = {Network coding is a new research area that may have interesting applications
        in practical networking systems. With network coding, intermediate nodes may send
        out packets that are linear combinations of previously received information.
        There are two main benefits of this approach: potential throughput improvements
        and a high degree of robustness. Robustness translates into loss resilience and
        facilitates the design of simple distributed algorithms that perform well, even
        if decisions are based only on partial information. This paper is an instant
        primer on network coding: we explain what network coding does and how it does it.
        We also discuss the implications of theoretical results on network coding for
        realistic settings and show how network coding can be used in practice}, 
  www_section = {network coding}, 
  issn = {0146-4833}, 
  doi = {http://doi.acm.org/10.1145/1111322.1111337}, 
  url = {http://doi.acm.org/10.1145/1111322.1111337}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/SIGCOMM\%20Comput.\%20Commun.\%20Rev\%20-\%20Network\%20Coding\%3A\%20an\%20Instant\%20Primer.pdf},
}
Freedman02introducingtarzan
@booklet{Freedman02introducingtarzan,
  title = {Introducing Tarzan, a Peer-to-Peer Anonymizing Network Layer}, 
  author = {Michael J. Freedman and Emil Sit and Josh Cates and Robert Morris}, 
  journal = {Revised Papers from the First International Workshop on Peer-to-Peer Systems}, 
  volume = {Vol. 2429}, 
  year = {2002}, 
  pages = {121--129}, 
  abstract = {We introduce Tarzan, a peer-to-peer anonymous network layer that provides
        generic IP forwarding. Unlike prior anonymizing layers, Tarzan is flexible,
        transparent, decentralized, and highly scalable. Tarzan achieves these properties
        by building anonymous IP tunnels between an open-ended set of peers. Tarzan can
        provide anonymity to existing applications, such as web browsing and file
        sharing, without change to those applications. Performance tests show that Tarzan
        imposes minimal overhead over a corresponding non-anonymous overlay route}, 
  isbn = {3-540-44179-4}, 
  url = {http://www.cs.rice.edu/Conferences/IPTPS02/182.pdf}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/tarzan.pdf}, 
%%%%% ERROR: Missing field
% www_section = {?????},
}
Freedman03sloppyhashing
@booklet{Freedman03sloppyhashing,
  title = {Sloppy Hashing and Self-Organizing Clusters}, 
  author = {Michael J. Freedman and David Mazi{\`e}res}, 
  journal = {In IPTPS}, 
  volume = {Volume 2735/2003}, 
  year = {2003}, 
  pages = {45--55}, 
  publisher = {Springer Berlin / Heidelberg}, 
  abstract = {We are building Coral, a peer-to-peer content distribution system. Coral
        creates self-organizing clusters of nodes that fetch information from each other
        to avoid communicating with more distant or heavily-loaded servers. Coral indexes
        data, but does not store it. The actual content resides where it is used, such as
        in nodes' local web caches. Thus, replication happens exactly in proportion to
        demand}, 
  isbn = {978-3-540-40724-9}, 
  url = {www.coralcdn.org/docs/coral-iptps03.ps}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/coral-iptps03.pdf}, 
%%%%% ERROR: Missing field
% www_section = {?????},
}
Fu:2002:FSD:505452.505453
@conference{Fu:2002:FSD:505452.505453,
  title = {Fast and secure distributed read-only file system}, 
  author = {Kevin Fu and Frans M. Kaashoek and David Mazi{\`e}res}, 
  booktitle = {OSDI 2000--Proceedings of the 4th USENIX Symposium on Operating Systems
        Design and Implementation}, 
  organization = {ACM}, 
  volume = {20}, 
  year = {2002}, 
  month = {October}, 
  address = {San Diego, CA, USA}, 
  pages = {1--24}, 
  publisher = {ACM}, 
  abstract = {Internet users increasingly rely on publicly available data for everything
        from software installation to investment decisions. Unfortunately, the vast
        majority of public content on the Internet comes with no integrity or
        authenticity guarantees. This paper presents the self-certifying read-only file
        system, a content distribution system providing secure, scalable access to
        public, read-only data. The read-only file system makes the security of published
        content independent from that of the distribution infrastructure. In a secure
        area (perhaps off-line), a publisher creates a digitally-signed database out of a
        file system's contents. The publisher then replicates the database on untrusted
        content-distribution servers, allowing for high availability. The read-only file
        system protocol furthermore pushes the cryptographic cost of content verification
        entirely onto clients, allowing servers to scale to a large number of clients.
        Measurements of an implementation show that an individual server running on a 550
        Mhz Pentium III with FreeBSD can support 1,012 connections per second and 300
        concurrent clients compiling a large software package}, 
  www_section = {file systems, read-only, security}, 
  issn = {0734-2071}, 
  doi = {http://doi.acm.org/10.1145/505452.505453}, 
  url = {http://doi.acm.org/10.1145/505452.505453}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/OSDI\%2700\%20-\%20Fast\%20and\%20Secure\%20Distributed\%20Read-Only\%20File\%20System.pdf},
}
Fu::FlowMarking::2005
@conference{Fu::FlowMarking::2005,
  title = {On Flow Marking Attacks in Wireless Anonymous Communication Networks}, 
  author = {Xinwen Fu and Ye Zhu and Bryan Graham and Riccardo Bettati and Wei Zhao}, 
  booktitle = {Proceedings of the IEEE International Conference on Distributed Computing
        Systems (ICDCS)}, 
  organization = {IEEE Computer Society Washington, DC, USA}, 
  year = {2005}, 
  month = {April}, 
  publisher = {IEEE Computer Society Washington, DC, USA}, 
  abstract = {This paper studies the degradation of anonymity in a flow-based wireless mix
        network under flow marking attacks, in which an adversary embeds a recognizable
        pattern of marks into wireless traffic flows by electromagnetic interference. We
        find that traditional mix technologies are not effective in defeating flow
        marking attacks, and it may take an adversary only a few seconds to recognize the
        communication relationship between hosts by tracking suchartificial marks. Flow
        marking attacks utilize frequency domain analytical techniques and convert time
        domain marks into invariant feature frequencies. To counter flow marking attacks,
        we propose a new countermeasure based on digital filtering technology, and show
        that this filter-based counter-measure can effectively defend a wireless mix
        network from flow marking attacks}, 
  www_section = {802.11, anonymity, Bluetooth, flow marking attack}, 
  isbn = {0-7695-2331-5}, 
  url = {http://portal.acm.org/citation.cfm?id=1069397}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Fu--FlowMarking--2005.pdf},
}
Fuhrmann03resultson
@conference{Fuhrmann03resultson,
  title = {Results on the practical feasibility of programmable network services}, 
  author = {Thomas Fuhrmann and Till Harbaum and Panos Kassianidis and Marcus Schoeller and
        Martina Zitterbart}, 
  booktitle = {In 2nd International Workshop on Active Network Technologies and
        Applications (ANTA)}, 
  year = {2003}, 
  abstract = {Active and programmable networks have been subject to intensive and
        successful research activities during the last couple of years. Many ideas and
        concepts have been pursued. However, only a few prototype implementations that
        have been developed so far, can deal with different applications in a larger
        scale setting. Moreover, detailed performance analyses of such prototypes are
        greatly missing today. Therefore, this paper does not present yet another
        architecture for active and programmable networks. In contrast, it rather focuses
        on the performance evaluation of the so-called AMnet approach that has already
        been presented previously [1]. As such, the paper demonstrates that an
        operational high-performance programmable network system with AAA
        (authentication, authorization, and accounting) security functionality will in
        fact be feasible in the near future}, 
  www_section = {programmable networks}, 
  url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.67.3074}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/fuhrmann03performance.pdf},
}
Fuhrmann05scalablerouting
@conference{Fuhrmann05scalablerouting,
  title = {Scalable routing for networked sensors and actuators}, 
  author = {Thomas Fuhrmann}, 
  booktitle = {In Proceedings of the Second Annual IEEE Communications Society Conference
        on Sensor and Ad Hoc Communications and Networks}, 
  year = {2005}, 
  abstract = {The design of efficient routing protocols for ad hoc and sensor networks is
        challenging for several reasons: Physical network topology is random. Nodes have
        limited computation and memory capabilities. Energy and bisection bandwidth are
        scarce. Furthermore, in most settings, the lack of centralized components leaves
        all network control tasks to the nodes acting as decentralized peers. In this
        paper, we present a novel routing algorithm, scalable source routing (SSR), which
        is capable of memory and message efficient routing in large random networks. A
        guiding example is a community of 'digital homes ' where smart sensors and
        actuators are installed by laypersons. Such networks combine wireless ad-hoc and
        infrastructure networks, and lack a well-crafted network topology. Typically, the
        nodes do not have sufficient processing and memory resources to perform
        sophisticated routing algorithms. Flooding on the other hand is too
        bandwidthconsuming in the envisaged large-scale networks. SSR is a fully
        self-organizing routing protocol for such scenarios. It creates a virtual ring
        that links all nodes via predecessor/successor source routes. Additionally, each
        node possesses O(log N) short-cut source routes to nodes in exponentially
        increasing virtual ring distance. Like with the Chord overlay network, this
        ensures full connectivity within the network. Moreover, it provides a routing
        semantic which can efficiently support indirection schemes like i3. Memory and
        message efficiency are achieved by the introduction of a route cache together
        with a set of path manipulation rules that allow to produce near-to-optimal
        paths}, 
  www_section = {scalable source routing, sensor networks, wireless sensor network}, 
  url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.67.6509}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.67.6509.pdf}, 
}
Fuhrmann_anode
@booklet{Fuhrmann_anode,
  title = {A Node Evaluation Mechanism for Service Setup in}, 
  author = {Thomas Fuhrmann and Marcus Schoeller and Christina Schmidt and Martina
        Zitterbart}, 
  year = {2003}, 
  abstract = {AMnet is a programmable network that aims at the flexible and rapid creation
        of services within an IP network. Examples for typical services include network
        layer enhancements e.g. for multicast and mobility, transport layer enhancements
        e.g. to integrate wireless LANs, and various application layer services e.g. for
        media transcoding and content distribution. AMnet is based on regular Linux boxes
        that run an execution environment (EE), a resource monitor, and a basic
        signaling-engine. These so-called active nodes run the services and provide
        support for resource-management and module-relocation. Services are created by
        service modules, small pieces of code, that are executed within the EE. Based on
        the standard netfilter mechanism of Linux, service modules have full access to
        the network traffic passing through the active node. This paper describes the
        evaluation mechanism for service setup in AMnet. In order to determine where a
        service module can be started, service modules are accompanied by evaluation
        modules. This allows service module authors to implement various customized
        strategies for node-selection and service setup. Examples that are supported by
        the AMnet evaluation mechanism are a) service setup at a fixed position, e.g. as
        gateway, b) along a fixed path (with variable position along that path), c) at
        variable positions inside the network with preferences for certain
        constellations, or d) at an unspecified position, e.g. for modification of
        multicasted traffic. The required path information is gathered by the AMnodes
        present in the network. By interaction with the resource monitors of the AMnodes
        and the service module repository of the respective administrative domain, the
        AMnet evaluation also ensures overall system security and stability}, 
  url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.69.8749}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/fuhrmann03evaluation.pdf},
%%%%% ERROR: Missing field
% www_section = {?????},
}
Fuhrmann_aplatform
@booklet{Fuhrmann_aplatform,
  title = {A platform for lab exercises in sensor networks}, 
  author = {Thomas Fuhrmann and Till Harbaum}, 
  year = {2005}, 
  abstract = {Programming of and experiences with sensor network nodes are about to enter
        the curricula of technical universities. Often however, practical obstacles
        complicate the implementation of a didactic concept. In this paper we present our
        approach that uses a Java virtual machine to decouple experiments with algorithm
        and protocol concepts from the odds of embedded system programming. This concept
        enables students to load Java classes via an SD-card into a sensor node. An LC
        display provides detailed information if the program aborts due to bugs}, 
  www_section = {sensor networks}, 
  url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.72.8036}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.72.8036.pdf}, 
}
Fuhrmann_networkservices
@booklet{Fuhrmann_networkservices,
  title = {Network Services for the Support of Very-Low-Resource Devices}, 
  author = {Thomas Fuhrmann and Till Harbaum and Martina Zitterbart}, 
  year = {2003}, 
  abstract = {Visions of future computing scenarios envisage a multitude of
        very-low-resource devices linked by power-efficient wireless communication means.
        This paper presents our vision of such a scenario. From this vision requirements
        are derived for an infrastructure that is able to satisfy the largely differing
        needs of these devices. The paper also shows how innovative, collaborating
        applications between distributed sensors and actuators can arise from such an
        infrastructure. The realization of such innovative applications is illustrated
        with two examples of straightforward services that have been implemented with the
        AMnet infrastructure that is currently being developed in the FlexiNet project.
        Additionally, first performance measurements for one of these services are given.
        Index terms {\textemdash} Bluetooth, Programmable networks, Sensoractuator
        networks}, 
  url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.69.186}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/ASWN2003.pdf}, 
%%%%% ERROR: Missing field
% www_section = {?????},
}
Fuhrmann_usingbluetooth
@booklet{Fuhrmann_usingbluetooth,
  title = {Using Bluetooth for Informationally Enhanced Environments Abstract}, 
  author = {Thomas Fuhrmann and Till Harbaum}, 
  year = {2003}, 
  abstract = {The continued miniaturization in computing and wireless communication is
        about to make informationally enhanced environments become a reality. Already
        today, devices like a notebook computer or a personal digital assistent (PDA) can
        easily connect to the Internet via IEEE 802.11 networks (WaveLAN) or similar
        technologies provided at so-called hot-spots. In the near future, even smaller
        devices can join a wireless network to exchange status information or send and
        receive commands. In this paper, we present sample uses of a generic Bluetooth
        component that we have developed and that has been successfully integrated into
        various mininature devices to transmit sensor data or exchange control commands.
        The use of standard protocols like TCP/IP, Obex, and HTTP simplifies the use of
        those devices with conventional devices (notebook, PDA, cell-phone) without even
        requiring special drivers or applications for these devices. While such scenarios
        have already often been dreamt of, we are able to present a working solution
        based on small and cost-effective standard elements. We describe two applications
        that illustrate the power this approach in the broad area of e-commerce,
        e-learning, and e-government: the BlueWand, a small, pen-like device that can
        control Bluetooth devices in its vincinity by simple gestures, and a door plate
        that can display messages that are posted to it e.g. by a Bluetooth PDA.
        Keywords: Human-Computer Interaction, Ubiquitous Computing, Wireless
        Communications (Bluetooth)}, 
  www_section = {Bluetooth, ubiquitous computing}, 
  url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.73.2131}, 
}
GHPvR05
@conference{GHPvR05,
  title = {Provable Anonymity}, 
  author = {Flavio D. Garcia and Ichiro Hasuo and Wolter Pieters and Peter van Rossum}, 
  booktitle = {Proceedings of the 3rd ACM Workshop on Formal Methods in Security
        Engineering (FMSE05)}, 
  year = {2005}, 
  month = {November}, 
  address = {Alexandria, VA, USA}, 
  abstract = {This paper provides a formal framework for the analysis of information hiding
        properties of anonymous communication protocols in terms of epistemic logic.The
        key ingredient is our notion of observational equivalence, which is based on the
        cryptographic structure of messages and relations between otherwise random
        looking messages. Two runs are considered observationally equivalent if a spy
        cannot discover any meaningful distinction between them.We illustrate our
        approach by proving sender anonymity and unlinkability for two anonymizing
        protocols, Onion Routing and Crowds. Moreover, we consider a version of Onion
        Routing in which we inject a subtle error and show how our framework is capable
        of capturing this flaw}, 
  www_section = {cryptography, onion routing}, 
  isbn = {1-59593-231-3}, 
  doi = {10.1145/1103576.1103585}, 
  url = {http://portal.acm.org/citation.cfm?id=1103576.1103585}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/GHPvR05.pdf}, 
}
GKK03
@conference{GKK03,
  title = {Rapid Mixing and Security of Chaum's Visual Electronic Voting}, 
  author = {Marcin Gomulkiewicz and Marek Klonowski and Miroslaw Kutylowski}, 
  booktitle = {Proceedings of ESORICS 2003}, 
  organization = {Springer Berlin / Heidelberg}, 
  year = {2003}, 
  month = {October}, 
  publisher = {Springer Berlin / Heidelberg}, 
  abstract = {Recently, David Chaum proposed an electronic voting scheme that combines
        visual cryptography and digital processing. It was designed to meet not only
        mathematical security standards, but also to be accepted by voters that do not
        trust electronic devices. In this scheme mix-servers are used to guarantee
        anonymity of the votes in the counting process. The mix-servers are operated by
        different parties, so an evidence of their correct operation is necessary. For
        this purpose the protocol uses randomized partial checking of Jakobsson et al.,
        where some randomly selected connections between the (encoded) inputs and outputs
        of a mix-server are revealed. This leaks some information about the ballots, even
        if intuitively this information cannot be used for any efficient attack. We
        provide a rigorous stochastic analysis of how much information is revealed by
        randomized partial checking in the Chaums protocol. We estimate how many
        mix-servers are necessary for a fair security level. Namely, we consider
        probability distribution of the permutations linking the encoded votes with the
        decoded votes given the information revealed by randomized partial checking. We
        show that the variation distance between this distribution and the uniform
        distribution is already for a constant number of mix-servers (n is the number of
        voters). This means that a constant number of trustees in the Chaums protocol is
        enough to obtain provable security. The analysis also shows that certain details
        of the Chaums protocol can be simplified without lowering security level}, 
  www_section = {electronic voting, Markov chain, path coupling, randomized partial
        checking, rapid mixing}, 
  isbn = {978-3-540-20300-1}, 
  doi = {10.1007/b13237}, 
  url = {http://www.springerlink.com/content/5gmj68nn4x1xc4j1/}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/GKK03.pdf}, 
}
Gairing:2005:SRI:1073970.1074000
@conference{Gairing:2005:SRI:1073970.1074000,
  title = {Selfish Routing with Incomplete Information}, 
  author = {Gairing, Martin and Monien, Burkhard and Tiemann, Karsten}, 
  booktitle = {SPAA'05. Proceedings of the 17th Annual ACM Symposium on Parallelism in
        Algorithms and Architectures}, 
  organization = {ACM}, 
  year = {2005}, 
  month = {July}, 
  address = {Las Vegas, Nevada}, 
  pages = {203--212}, 
  publisher = {ACM}, 
  series = {SPAA '05}, 
  abstract = {In his seminal work Harsanyi introduced an elegant approach to study
        non-cooperative games with incomplete information where the players are uncertain
        about some parameters. To model such games he introduced the Harsanyi
        transformation, which converts a game with incomplete information to a strategic
        game where players may have different types. In the resulting Bayesian game
        players' uncertainty about each others types is described by a probability
        distribution over all possible type profiles.In this work, we introduce a
        particular selfish routing game with incomplete information that we call Bayesian
        routing game. Here, n selfish users wish to assign their traffic to one of m
        links. Users do not know each others traffic. Following Harsanyi's approach, we
        introduce for each user a set of possible types.This paper presents a
        comprehensive collection of results for the Bayesian routing game.We prove, with
        help of a potential function, that every Bayesian routing game possesses a pure
        Bayesian Nash equilibrium. For the model of identical links and independent type
        distribution we give a polynomial time algorithm to compute a pure Bayesian Nash
        equilibrium.We study structural properties of fully mixed Bayesian Nash
        equilibria for the model of identical links and show that they maximize
        individual cost. In general there exists more than one fully mixed Bayesian Nash
        equilibrium. We characterize the class of fully mixed Bayesian Nash equilibria in
        the case of independent type distribution.We conclude with results on
        coordination ratio for the model of identical links for three social cost
        measures, that is, social cost as expected maximum congestion, sum of individual
        costs and maximum individual cost. For the latter two we are able to give
        (asymptotic) tight bounds using our results on fully mixed Bayesian Nash
        equilibria.To the best of our knowledge this is the first time that mixed
        Bayesian Nash equilibria have been studied in conjunction with social cost}, 
  www_section = {bayesian game, coordination ratio, incomplete information, nash
        equilibria, selfish routing}, 
  isbn = {1-58113-986-1}, 
  doi = {http://doi.acm.org/10.1145/1073970.1074000}, 
  url = {http://doi.acm.org/10.1145/1073970.1074000}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/SPAA\%2705\%20-\%20Selfish\%20routing\%20with\%20incomplete\%20information.pdf},
}
Garbacki:2007:ATP:1270401.1271766
@conference{Garbacki:2007:ATP:1270401.1271766,
  title = {An Amortized Tit-For-Tat Protocol for Exchanging Bandwidth instead of Content in
        P2P Networks}, 
  author = {Garbacki, Pawel and Epema, Dick H. J. and van Steen, Maarten}, 
  booktitle = {SASO 2007. Proceedings of the First International Conference on
        Self-Adaptive and Self-Organizing Systems}, 
  organization = {IEEE Computer Society}, 
  year = {2007}, 
  month = {July}, 
  address = {Boston, Massachusetts}, 
  pages = {119--128}, 
  publisher = {IEEE Computer Society}, 
  series = {SASO '07}, 
  abstract = {Incentives for resource sharing are crucial for the proper operation of P2P
        networks. The principle of the incentive mechanisms in current content sharing
        P2P networks such as BitTorrent is to have peers exchange content of mutual
        interest. As a consequence, a peer can actively participate in the system only if
        it shares content that is of immediate interest to other peers. In this paper we
        propose to lift this restriction by using bandwidth rather than content as the
        resource upon which incentives are based. Bandwidth, in contrast to content, is
        independent of peer interests and so can be exchanged between any two peers. We
        present the design of a protocol called amortized tit-for-tat (ATFT) based on the
        bandwidth-exchange concept. This protocol defines mechanisms for bandwidth
        exchange corresponding to those in BitTorrent for content exchange, in particular
        for finding bandwidth borrowers that amortize the bandwidth borrowed in the past
        with their currently idle bandwidth. In addition to the formally proven
        incentives for bandwidth contributions, ATFT provides natural solutions to the
        problems of peer bootstrapping, seeding incentive, peer link asymmetry, and
        anonymity, which have previously been addressed with much more complex designs.
        Experiments with a realworld dataset confirm that ATFT is efficient in enforcing
        bandwidth contributions and results in download performance better than provided
        by incentive mechanisms based on content exchange}, 
  isbn = {0-7695-2906-2}, 
  doi = {http://dx.doi.org/10.1109/SASO.2007.9}, 
  url = {http://dx.doi.org/10.1109/SASO.2007.9}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/SASO\%2707\%20-\%20Garbacki\%2C\%20Epema\%20\%26\%20van\%20Steen.pdf},
%%%%% ERROR: Missing field
% www_section = {?????},
}
Garbacki:2007:ATP:1270401.1271766_0
@conference{Garbacki:2007:ATP:1270401.1271766_0,
  title = {An Amortized Tit-For-Tat Protocol for Exchanging Bandwidth instead of Content in
        P2P Networks}, 
  author = {Garbacki, Pawel and Epema, Dick H. J. and van Steen, Maarten}, 
  booktitle = {SASO 2007. Proceedings of the First International Conference on
        Self-Adaptive and Self-Organizing Systems}, 
  organization = {IEEE Computer Society}, 
  year = {2007}, 
  month = {July}, 
  address = {Boston, Massachusetts}, 
  pages = {119--128}, 
  publisher = {IEEE Computer Society}, 
  series = {SASO '07}, 
  abstract = {Incentives for resource sharing are crucial for the proper operation of P2P
        networks. The principle of the incentive mechanisms in current content sharing
        P2P networks such as BitTorrent is to have peers exchange content of mutual
        interest. As a consequence, a peer can actively participate in the system only if
        it shares content that is of immediate interest to other peers. In this paper we
        propose to lift this restriction by using bandwidth rather than content as the
        resource upon which incentives are based. Bandwidth, in contrast to content, is
        independent of peer interests and so can be exchanged between any two peers. We
        present the design of a protocol called amortized tit-for-tat (ATFT) based on the
        bandwidth-exchange concept. This protocol defines mechanisms for bandwidth
        exchange corresponding to those in BitTorrent for content exchange, in particular
        for finding bandwidth borrowers that amortize the bandwidth borrowed in the past
        with their currently idle bandwidth. In addition to the formally proven
        incentives for bandwidth contributions, ATFT provides natural solutions to the
        problems of peer bootstrapping, seeding incentive, peer link asymmetry, and
        anonymity, which have previously been addressed with much more complex designs.
        Experiments with a realworld dataset confirm that ATFT is efficient in enforcing
        bandwidth contributions and results in download performance better than provided
        by incentive mechanisms based on content exchange}, 
  www_section = {bandwidth exchange, p2p network, resource sharing, tit-for-tat}, 
  isbn = {0-7695-2906-2}, 
  doi = {http://dx.doi.org/10.1109/SASO.2007.9}, 
  url = {http://dx.doi.org/10.1109/SASO.2007.9}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/SASO\%2707\%20-\%20Garbacki\%2C\%20Epema\%20\%26\%20van\%20Steen.pdf},
}
Garces-Erice2004DataIndexing
@conference{Garces-Erice2004DataIndexing,
  title = {Data Indexing in Peer-to-Peer DHT Networks}, 
  author = {L Garc{\'e}s-Erice and Felber, P. A. and E W Biersack and Urvoy-Keller, G. and
        Ross, K. W.}, 
  booktitle = {Proceedings of the 24th International Conference on Distributed Computing
        Systems (ICDCS'04)}, 
  organization = {IEEE Computer Society}, 
  year = {2004}, 
  address = {Washington, DC, USA}, 
  pages = {200--208}, 
  publisher = {IEEE Computer Society}, 
  series = {ICDCS '04}, 
  isbn = {0-7695-2086-3}, 
  url = {http://dl.acm.org/citation.cfm?id=977400.977979}, 
%%%%% ERROR: Missing field
% www_section = {?????},
}
Garcia05off-linekarma:
@conference{Garcia05off-linekarma:,
  title = {Off-line Karma: A Decentralized Currency for Peer-to-peer and Grid
        Applications}, 
  author = {Flavio D. Garcia and Jaap-Henk Hoepman}, 
  booktitle = {ACNS'05. 3rd Applied Cryptography and Network Security Conference}, 
  organization = {Springer}, 
  volume = {3531}, 
  year = {2005}, 
  month = {June}, 
  address = {New York, NY, USA}, 
  pages = {364--377}, 
  publisher = {Springer}, 
  series = {Lecture Notes in Computer Science}, 
  abstract = {Peer-to-peer (P2P) and grid systems allow their users to exchange information
        and share resources, with little centralised or hierarchical control, instead
        relying on the fairness of the users to make roughly as much resources available
        as they use. To enforce this balance, some kind of currency or barter (called
        karma) is needed that must be exchanged for resources thus limiting abuse. We
        present a completely decentralised, off-line karma implementation for P2P and
        grid systems, that detects double-spending and other types of fraud under varying
        adversarial scenarios. The system is based on tracing the spending pattern of
        coins, and distributing the normally central role of a bank over a predetermined,
        but random, selection of nodes. The system is designed to allow nodes to join and
        leave the system at arbitrary times}, 
  www_section = {decentralized, free-riding, GRID, micropayments, peer-to-peer networking,
        security}, 
  doi = {10.1007/11496137_25}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/ACNS\%2705\%20-\%20Garcia\%20\%26\%20Hoepman\%20-\%20Off-line\%20Karma.pdf},
}
Gay03thenesc
@conference{Gay03thenesc,
  title = {The nesC language: A holistic approach to networked embedded systems}, 
  author = {David Gay and Matt Welsh and Philip Levis and Eric Brewer and Robert Von Behren
        and Culler, David}, 
  booktitle = {In Proceedings of Programming Language Design and Implementation (PLDI)}, 
  year = {2003}, 
  pages = {1--11}, 
  abstract = {We present nesC, a programming language for networked embedded systems that
        represent a new design space for application developers. An example of a
        networked embedded system is a sensor network, which consists of (potentially)
        thousands of tiny, low-power "motes," each of which execute concurrent, reactive
        programs that must operate with severe memory and power constraints.nesC's
        contribution is to support the special needs of this domain by exposing a
        programming model that incorporates event-driven execution, a flexible
        concurrency model, and component-oriented application design. Restrictions on the
        programming model allow the nesC compiler to perform whole-program analyses,
        including data-race detection (which improves reliability) and aggressive
        function inlining (which reduces resource consumption).nesC has been used to
        implement TinyOS, a small operating system for sensor networks, as well as
        several significant sensor applications. nesC and TinyOS have been adopted by a
        large number of sensor network research groups, and our experience and evaluation
        of the language shows that it is effective at supporting the complex, concurrent
        programming style demanded by this new class of deeply networked systems}, 
  www_section = {data races, nesC, TinyOS}, 
  doi = {10.1145/781131.781133}, 
  url = {http://portal.acm.org/citation.cfm?id=781133}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.127.9488.pdf}, 
}
Godfrey05heterogeneityand
@conference{Godfrey05heterogeneityand,
  title = {Heterogeneity and Load Balance in Distributed Hash Tables}, 
  author = {Godfrey, Brighten and Ion Stoica}, 
  booktitle = {IN PROC. OF IEEE INFOCOM}, 
  year = {2005}, 
  abstract = {Existing solutions to balance load in DHTs incur a high overhead either in
        terms of routing state or in terms of load movement generated by nodes arriving
        or departing the system. In this paper, we propose a set of general techniques
        and use them to develop a protocol based on Chord, called Y0 , that achieves load
        balancing with minimal overhead under the typical assumption that the load is
        uniformly distributed in the identifier space. In particular, we prove that Y0
        can achieve near-optimal load balancing, while moving little load to maintain the
        balance and increasing the size of the routing tables by at most a constant
        factor}, 
  www_section = {Chord, distributed hash table, load balancing}, 
  url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.61.6740}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/paper.pdf}, 
}
Godfrey:2006:MCD:1151659.1159931
@article{Godfrey:2006:MCD:1151659.1159931,
  title = {Minimizing churn in distributed systems}, 
  author = {Godfrey, Brighten and S Shenker and Ion Stoica}, 
  journal = {SIGCOMM Computer Communication Review}, 
  volume = {36}, 
  year = {2006}, 
  month = {August}, 
  address = {New York, NY, USA}, 
  pages = {147--158}, 
  publisher = {ACM}, 
  abstract = {A pervasive requirement of distributed systems is to deal with churn-change
        in the set of participating nodes due to joins, graceful leaves, and failures. A
        high churn rate can increase costs or decrease service quality. This paper
        studies how to reduce churn by selecting which subset of a set of available nodes
        to use.First, we provide a comparison of the performance of a range of different
        node selection strategies in five real-world traces. Among our findings is that
        the simple strategy of picking a uniform-random replacement whenever a node fails
        performs surprisingly well. We explain its performance through analysis in a
        stochastic model.Second, we show that a class of strategies, which we call
        "Preference List" strategies, arise commonly as a result of optimizing for a
        metric other than churn, and produce high churn relative to more randomized
        strategies under realistic node failure patterns. Using this insight, we
        demonstrate and explain differences in performance for designs that incorporate
        varying degrees of randomization. We give examples from a variety of protocols,
        including anycast, over-lay multicast, and distributed hash tables. In many
        cases, simply adding some randomization can go a long way towards reducing
        churn}, 
  www_section = {churn, distributed hash table, multicast, node selection}, 
  issn = {0146-4833}, 
  doi = {http://doi.acm.org/10.1145/1151659.1159931}, 
  url = {http://doi.acm.org/10.1145/1151659.1159931}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/SIGCOMM\%20Comp.\%20Comm.\%20Rev.\%20-\%20Minimizing\%20churn\%20in\%20distributed\%20systems.pdf},
}
Goh04secureindexes
@conference{Goh04secureindexes,
  title = {Secure Indexes}, 
  author = {Eu-jin Goh}, 
  booktitle = {In submission}, 
  year = {2004}, 
  url = {http://gnunet.org/papers/secureindex.pdf}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/secureindex.pdf}, 
%%%%% ERROR: Missing field
% www_section = {?????},
}
Goldberg:2008:RTA:1402958.1402989
@conference{Goldberg:2008:RTA:1402958.1402989,
  title = {Rationality and Traffic Attraction: Incentives for Honest Path Announcements in
        BGP}, 
  author = {Goldberg, Sharon and Halevi, Shai and Jaggard, Aaron D. and Ramachandran, Vijay
        and Wright, Rebecca N.}, 
  booktitle = {SIGCOMM'08. Proceedings of the ACM SIGCOMM 2008 Conference on Data
        Communication}, 
  organization = {ACM}, 
  year = {2008}, 
  month = {October}, 
  address = {Seattle, WA}, 
  pages = {267--278}, 
  publisher = {ACM}, 
  series = {SIGCOMM Computer Communication Review}, 
  abstract = {We study situations in which autonomous systems (ASes) may have incentives to
        send BGP announcements differing from the AS-level paths that packets traverse in
        the data plane. Prior work on this issue assumed that ASes seek only to obtain
        the best possible outgoing path for their traffic. In reality, other factors can
        influence a rational AS's behavior. Here we consider a more natural model, in
        which an AS is also interested in attracting incoming traffic (e.g., because
        other ASes pay it to carry their traffic). We ask what combinations of BGP
        enhancements and restrictions on routing policies can ensure that ASes have no
        incentive to lie about their data-plane paths. We find that protocols like S-BGP
        alone are insufficient, but that S-BGP does suffice if coupled with additional
        (quite unrealistic) restrictions on routing policies. Our game-theoretic analysis
        illustrates the high cost of ensuring that the ASes honestly announce data-plane
        paths in their BGP path announcements}, 
  www_section = {as, autonomus system, bgp, incentives}, 
  isbn = {978-1-60558-175-0}, 
  doi = {http://doi.acm.org/10.1145/1402958.1402989}, 
  url = {http://doi.acm.org/10.1145/1402958.1402989}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/SIGCOMM\%2708\%20-\%20Rationality\%20and\%20traffic\%20attraction.pdf},
}
Goldreich98securemulti-party
@book{Goldreich98securemulti-party,
  title = {Secure Multi-Party Computation}, 
  author = {Oded Goldreich}, 
  booktitle = {The Foundations of Cryptography}, 
  organization = {Cambridge University Press}, 
  volume = {2}, 
  year = {1998}, 
  publisher = {Cambridge University Press}, 
  url = {http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.11.2201\&rep=rep1\&type=pdf},
  www_section = {Unsorted}, 
}
Goldschlag99onionrouting
@article{Goldschlag99onionrouting,
  title = {Onion Routing for Anonymous and Private Internet Connections}, 
  author = {David Goldschlag and Michael Reed and Paul Syverson}, 
  journal = {Communications of the ACM}, 
  volume = {42}, 
  year = {1999}, 
  pages = {39--41}, 
  abstract = {this article's publication, the prototype network is processing more than 1
        million Web connections per month from more than six thousand IP addresses in
        twenty countries and in all six main top level domains. [7] Onion Routing
        operates by dynamically building anonymous connections within a network of
        real-time Chaum Mixes [3]. A Mix is a store and forward device that accepts a
        number of fixed-length messages from numerous sources, performs cryptographic
        transformations on the messages, and then forwards the messages to the next
        destination in a random order. A single Mix makes tracking of a particular
        message either by specific bit-pattern, size, or ordering with respect to other
        messages difficult. By routing through numerous Mixes in the network, determining
        who is talking to whom becomes even more difficult. Onion Routing's network of
        core onion-routers (Mixes) is distributed, faulttolerant, and under the control
        of multiple administrative domains, so no single onion-router can bring down the
        network or compromise a user's privacy, and cooperation between compromised
        onion-routers is thereby confounded}, 
  url = {http://www.onion-router.net/Publications/CACM-1999}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/onionrouting.pdf}, 
%%%%% ERROR: Missing field
% www_section = {?????},
}
Golle01incentivesfor
@conference{Golle01incentivesfor,
  title = {Incentives for Sharing in Peer-to-Peer Networks}, 
  author = {Philippe Golle and Kevin Leyton-Brown and Ilya Mironov and Mark Lillibridge}, 
  booktitle = {EC'01: Proceedings of the Second International Workshop on Electronic
        Commerce}, 
  organization = {Springer-Verlag}, 
  year = {2001}, 
  address = {London, UK}, 
  pages = {75--87}, 
  publisher = {Springer-Verlag}, 
  abstract = {We consider the free-rider problem in peer-to-peer file sharing networks such
        as Napster: that individual users are provided with no incentive for adding value
        to the network. We examine the design implications of the assumption that users
        will selfishly act to maximize their own rewards, by constructing a formal game
        theoretic model of the system and analyzing equilibria of user strategies under
        several novel payment mechanisms. We support and extend this work with results
        from experiments with a multi-agent reinforcement learning model}, 
  www_section = {free-riding, incentives}, 
  isbn = {3-540-42878-X}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.23.9004.pdf}, 
}
Golle:sp2006
@conference{Golle:sp2006,
  title = {Deterring Voluntary Trace Disclosure in Re-encryption Mix Networks}, 
  author = {Philippe Golle and XiaoFeng Wang and Jakobsson, Markus and Alex Tsow}, 
  booktitle = {Proceedings of the 2006 IEEE Symposium on Security and Privacy}, 
  organization = {IEEE CS}, 
  year = {2006}, 
  month = {May}, 
  address = {Oakland, CA}, 
  pages = {121--131}, 
  publisher = {IEEE CS}, 
  abstract = {Mix-networks, a family of anonymous messaging protocols, have been engineered
        to withstand a wide range of theoretical internal and external adversaries. An
        undetectable insider threat{\textemdash}voluntary partial trace disclosures by
        server administrators{\textemdash}remains a troubling source of vulnerability. An
        administrator's cooperation could be the resulting coercion, bribery, or a simple
        change of interests. While eliminating this insider threat is impossible, it is
        feasible to deter such unauthorized disclosures by bundling them with additional
        penalties. We abstract these costs with collateral keys, which grant access to
        customizable resources. This article introduces the notion of trace-deterring
        mix-networks, which encode collateral keys for every server-node into every
        end-to-end message trace. The network reveals no keying material when the
        input-to-output transitions of individual servers remain secret. Two permutation
        strategies for encoding key information into traces, mix-and-flip and
        all-or-nothing, are presented. We analyze their trade-offs with respect to
        computational efficiency, anonymity sets, and colluding message senders. Our
        techniques have sufficiently low overhead for deployment in large-scale
        elections, thereby providing a sort of publicly verifiable privacy guarantee}, 
  www_section = {anonymity measurement, privacy, re-encryption}, 
  doi = {10.1145/1698750.1698758}, 
  url = {http://portal.acm.org/citation.cfm?id=1698750.1698758}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Golle-sp2006.pdf}, 
}
GolleJakobssonJuelsSyverson:universal04
@conference{GolleJakobssonJuelsSyverson:universal04,
  title = {Universal Re-Encryption for Mixnets}, 
  author = {Philippe Golle and Jakobsson, Markus and Ari Juels and Paul Syverson}, 
  booktitle = {Proceedings of the 2004 RSA Conference, Cryptographer's track}, 
  organization = {Springer Berlin / Heidelberg}, 
  year = {2004}, 
  month = {February}, 
  address = {San Francisco, USA}, 
  publisher = {Springer Berlin / Heidelberg}, 
  abstract = {We introduce a new cryptographic technique that we call universal
        re-encryption. A conventional cryptosystem that permits re-encryption, such as
        ElGamal, does so only for a player with knowledge of the public key corresponding
        to a given ciphertext. In contrast, universal re-encryption can be done without
        knowledge of public keys. We propose an asymmetric cryptosystem with universal
        re-encryption that is half as efficient as standard ElGamal in terms of
        computation and storage. While technically and conceptually simple, universal
        re-encryption leads to new types of functionality in mixnet architectures.
        Conventional mixnets are often called upon to enable players to communicate with
        one another through channels that are externally anonymous, i.e., that hide
        information permitting traffic-analysis. Universal re-encryption lets us
        construct a mixnet of this kind in which servers hold no public or private keying
        material, and may therefore dispense with the cumbersome requirements of key
        generation, key distribution, and private-key management. We describe two
        practical mixnet constructions, one involving asymmetric input ciphertexts, and
        another with hybrid-ciphertext inputs}, 
  www_section = {anonymity, private channels, universal re-encryption}, 
  isbn = {978-3-540-20996-6}, 
  doi = {10.1007/b95630}, 
  url = {http://www.springerlink.com/content/1fu5qrb1a2kfe7f9/}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/GolleJakobssonJuelsSyverson-universal04.pdf},
}
Goyal:2006:AEF:1180405.1180418
@conference{Goyal:2006:AEF:1180405.1180418,
  title = {Attribute-based encryption for fine-grained access control of encrypted data}, 
  author = {Goyal, Vipul and Pandey, Omkant and Amit Sahai and Waters, Brent}, 
  booktitle = {CCS'06--Proceedings of the 13th ACM Conference on Computer and
        Communications Security}, 
  organization = {ACM}, 
  year = {2006}, 
  month = {October}, 
  address = {Alexandria, VA, USA}, 
  pages = {89--98}, 
  publisher = {ACM}, 
  series = {CCS '06}, 
  abstract = {As more sensitive data is shared and stored by third-party sites on the
        Internet, there will be a need to encrypt data stored at these sites. One
        drawback of encrypting data, is that it can be selectively shared only at a
        coarse-grained level (i.e., giving another party your private key). We develop a
        new cryptosystem for fine-grained sharing of encrypted data that we call
        Key-Policy Attribute-Based Encryption (KP-ABE). In our cryptosystem, ciphertexts
        are labeled with sets of attributes and private keys are associated with access
        structures that control which ciphertexts a user is able to decrypt. We
        demonstrate the applicability of our construction to sharing of audit-log
        information and broadcast encryption. Our construction supports delegation of
        private keys which subsumesHierarchical Identity-Based Encryption (HIBE)}, 
  www_section = {access control, attribute-based encryption, audit logs, broadcast
        encryption, delegation, hierarchical identity-based encryption}, 
  isbn = {1-59593-518-5}, 
  doi = {http://doi.acm.org/10.1145/1180405.1180418}, 
  url = {http://doi.acm.org/10.1145/1180405.1180418}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/CCS\%2706\%20-\%20Attributed-based\%20encryption\%20for\%20fine-grained\%20access\%20control\%20of\%20encrypted\%20data.pdf},
}
Grolimund06havelaar:a
@conference{Grolimund06havelaar:a,
  title = {Havelaar: A Robust and Efficient Reputation System for Active Peer-to-Peer
        Systems}, 
  author = {Dominik Grolimund and Luzius Meisser and Stefan Schmid and Roger Wattenhofer}, 
  booktitle = {NetEcon'06. 1st Workshop on the Economics of Networked Systems Ann Arbor}, 
  year = {2006}, 
  month = {June}, 
  address = {Ann Arbor, Michigan}, 
  abstract = {Peer-to-peer (p2p) systems have the potential to harness huge amounts of
        resources. Unfortunately, however, it has been shown that most of today's p2p
        networks suffer from a large fraction of free-riders, which mostly consume
        resources without contributing much to the system themselves. This results in an
        overall performance degradation. One particularly interesting resource is
        bandwidth. Thereby, a service differentiation approach seems appropriate, where
        peers contributing higher upload bandwidth are rewarded with higher download
        bandwidth in return. Keeping track of the contribution of each peer in an open,
        decentralized environment, however, is not trivial; many systems which have been
        proposed are susceptible to false reports. Besides being prone to attacks, some
        solutions have a large communication and computation overhead, which can even be
        linear in the number of transactions{\textemdash}an unacceptable burden in
        practical and active systems. In this paper, we propose a reputation system which
        overcomes this scaling problem. Our analytical and simulation results are
        promising, indicating that the mechanism is accurate and efficient, especially
        when applied to systems where there are lots of transactions (e.g., due to
        erasure coding)}, 
  www_section = {free-riding, harvelaar, P2P, peer-to-peer networking, performance
        degradation, reputation system}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/NetEcon\%2706\%20-\%20Harvelaar.pdf},
}
Grolimund:2006:CFT:1173705.1174355
@conference{Grolimund:2006:CFT:1173705.1174355,
  title = {Cryptree: A Folder Tree Structure for Cryptographic File Systems}, 
  author = {Dominik Grolimund and Luzius Meisser and Stefan Schmid and Roger Wattenhofer}, 
  booktitle = {SRDS'06--Proceedings of the 25th IEEE Symposium on Reliable Distributed
        Systems}, 
  organization = {IEEE Computer Society}, 
  year = {2006}, 
  month = {October}, 
  address = {Leeds, UK}, 
  pages = {189--198}, 
  publisher = {IEEE Computer Society}, 
  abstract = {We present Cryptree, a cryptographic tree structure which facilitates access
        control in file systems operating on untrusted storage. Cryptree leverages the
        file system's folder hierarchy to achieve efficient and intuitive, yet simple,
        access control. The highlights are its ability to recursively grant access to a
        folder and all its subfolders in constant time, the dynamic inheritance of access
        rights which inherently prevents scattering of access rights, and the possibility
        to grant someone access to a file or folder without revealing the identities of
        other accessors. To reason about and to visualize Cryptree, we introduce the
        notion of cryptographic links. We describe the Cryptrees we have used to enforce
        read and write access in our own file system. Finally, we measure the performance
        of the Cryptree and compare it to other approaches}, 
  www_section = {cryptographic tree structure, cryptree, hierarchy, untrusted storage}, 
  isbn = {0-7695-2677-2}, 
  doi = {http://dx.doi.org/10.1109/SRDS.2006.15}, 
  url = {http://dl.acm.org/citation.cfm?id=1173705.1174355}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/SRDS\%2706\%20-\%20Cryptree.pdf},
}
Gulcu96mixingemail
@conference{Gulcu96mixingemail,
  title = {Mixing email with babel}, 
  author = {Ceki Gulcu and Gene Tsudik}, 
  booktitle = {Symposium on Network and Distributed System Security}, 
  year = {1996}, 
  pages = {2--16}, 
  abstract = {Increasingly large numbers of people communicate today via electronic means
        such as email or news forums. One of the basic properties of the current
        electronic communication means is the identification of the end-points. However,
        at times it is desirable or even critical to hide the identity and/or whereabouts
        of the end-points (e.g., human users) involved. This paper discusses the goals
        and desired properties of anonymous email in general and introduces the design
        and salient features of Babel anonymous remailer. Babel allows email users to
        converse electronically while remaining anonymous with respect to each other and
        to other-- even hostile--parties. A range of attacks and corresponding
        countermeasures is considered. An attempt is made to formalize and quantify
        certain dimensions of anonymity and untraceable communication}, 
  url = {http://eprints.kfupm.edu.sa/50994/1/50994.pdf}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/babel.pdf}, 
  www_section = {Unsorted}, 
}
Gummadi:2003:IDR:863955.863998
@conference{Gummadi:2003:IDR:863955.863998,
  title = {The impact of DHT routing geometry on resilience and proximity}, 
  author = {Krishna Phani Gummadi and Gummadi, Ramakrishna and Steven D. Gribble and Sylvia
        Paul Ratnasamy and S Shenker and Ion Stoica}, 
  booktitle = {SIGCOMM '03--Proceedings of the 2003 Conference on Applications,
        Technologies, Architectures, and Protocols for Computer Communications}, 
  organization = {ACM}, 
  year = {2003}, 
  month = {August}, 
  address = {Karlsruhe, Germany}, 
  pages = {381--394}, 
  publisher = {ACM}, 
  series = {SIGCOMM '03}, 
  abstract = {The various proposed DHT routing algorithms embody several different
        underlying routing geometries. These geometries include hypercubes, rings,
        tree-like structures, and butterfly networks. In this paper we focus on how these
        basic geometric approaches affect the resilience and proximity properties of
        DHTs. One factor that distinguishes these geometries is the degree of flexibility
        they provide in the selection of neighbors and routes. Flexibility is an
        important factor in achieving good static resilience and effective proximity
        neighbor and route selection. Our basic finding is that, despite our initial
        preference for more complex geometries, the ring geometry allows the greatest
        flexibility, and hence achieves the best resilience and proximity performance}, 
  www_section = {distributed hash table, flexibility, routing geometry}, 
  isbn = {1-58113-735-4}, 
  doi = {http://doi.acm.org/10.1145/863955.863998}, 
  url = {http://doi.acm.org/10.1145/863955.863998}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/SIGCOMM\%2703\%20-\%20The\%20impact\%20of\%20DHT\%20routing\%20geometry\%20on\%20resilience\%20and\%20proximity.pdf},
}
Guo:2005:MAM:1251086.1251090
@conference{Guo:2005:MAM:1251086.1251090,
  title = {Measurements, analysis, and modeling of BitTorrent-like systems}, 
  author = {Guo, Lei and Chen, Songqing and Xiao, Zhen and Tan, Enhua and Ding, Xiaoning
        and Zhang, Xiaodong}, 
  booktitle = {IMC'05. Proceedings of the 5th ACM SIGCOMM Conference on Internet
        Measurement}, 
  organization = {USENIX Association}, 
  year = {2005}, 
  month = {October}, 
  address = {Berkeley, CA, USA}, 
  pages = {4--4}, 
  publisher = {USENIX Association}, 
  series = {IMC '05}, 
  abstract = {Existing studies on BitTorrent systems are single-torrent based, while more
        than 85\% of all peers participate in multiple torrents according to our trace
        analysis. In addition, these studies are not sufficiently insightful and accurate
        even for single-torrent models, due to some unrealistic assumptions. Our analysis
        of representative Bit-Torrent traffic provides several new findings regarding the
        limitations of BitTorrent systems: (1) Due to the exponentially decreasing peer
        arrival rate in reality, service availability in such systems becomes poor
        quickly, after which it is difficult for the file to be located and downloaded.
        (2) Client performance in the BitTorrent-like systems is unstable, and fluctuates
        widely with the peer population. (3) Existing systems could provide unfair
        services to peers, where peers with high downloading speed tend to download more
        and upload less. In this paper, we study these limitations on torrent evolution
        in realistic environments. Motivated by the analysis and modeling results, we
        further build a graph based multi-torrent model to study inter-torrent
        collaboration. Our model quantitatively provides strong motivation for
        inter-torrent collaboration instead of directly stimulating seeds to stay longer.
        We also discuss a system design to show the feasibility of multi-torrent
        collaboration}, 
  www_section = {bittorrent system, intertorrent collaboration, multi-torrent
        collaboration, multiple torrents}, 
  url = {http://www.usenix.org/events/imc05/tech/full_papers/guo/guo_html/}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/IMC\%2705\%20-\%20Measurement\%2C\%20analysis\%20and\%20modeling\%20of\%20BitTorrent-like\%20systems.pdf},
}
Gupta03kelips:building
@conference{Gupta03kelips:building,
  title = {Kelips: Building an efficient and stable P2P DHT through increased memory and
        background overhead}, 
  author = {Indranil Gupta and Kenneth P. Birman and Prakash Linga and Alan Demers and
        Robbert Van Renesse}, 
  booktitle = {Proceedings of the 2nd International Workshop on Peer-to-Peer Systems (IPTPS
        '03)}, 
  year = {2003}, 
  abstract = {A peer-to-peer (p2p) distributed hash table (DHT) system allows hosts to join
        and fail silently (or leave), as well as to insert and retrieve files (objects).
        This paper explores a new point in design space in which increased memory usage
        and constant background communication overheads are tolerated to reduce file
        lookup times and increase stability to failures and churn. Our system, called
        Kelips, uses peer-to-peer gossip to partially replicate file index information.
        In Kelips, (a) under normal conditions, file lookups are resolved with O(1) time
        and complexity (i.e., independent of system size), and (b) membership changes
        (e.g., even when a large number of nodes fail) are detected and disseminated to
        the system quickly. Per-node memory requirements are small in medium-sized
        systems. When there are failures, lookup success is ensured through query
        rerouting. Kelips achieves load balancing comparable to existing systems.
        Locality is supported by using topologically aware gossip mechanisms. Initial
        results of an ongoing experimental study are also discussed}, 
  www_section = {distributed hash table, P2P}, 
  url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.13.3464}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.13.3464.pdf}, 
}
Gupta:2004:RMF:1018440.1021942
@conference{Gupta:2004:RMF:1018440.1021942,
  title = {Reputation Management Framework and Its Use as Currency in Large-Scale
        Peer-to-Peer Networks}, 
  author = {Gupta, Rohit and Somani, Arun K.}, 
  booktitle = {P2P'04. Proceedings of the 4th International Conference on Peer-to-Peer
        Computing}, 
  organization = {IEEE Computer Society}, 
  year = {2004}, 
  month = {August}, 
  address = {Zurich, Switzerland}, 
  pages = {124--132}, 
  publisher = {IEEE Computer Society}, 
  series = {P2P '04}, 
  abstract = {In this paper we propose a reputation management framework for large-scale
        peer-to-peer (P2P) networks, wherein all nodes are assumed to behave selfishly.
        The proposed framework has several advantages. It enables a form of virtual
        currency, such that the reputation of nodes is a measure of their wealth. The
        framework is scalable and provides protection against attacks by malicious nodes.
        The above features are achieved by developing trusted communities of nodes whose
        members trust each other and cooperate to deal with the problem of nodes{\'y}
        selfishness and possible maliciousness}, 
  www_section = {framework, P2P, peer-to-peer networking, reputation management}, 
  isbn = {0-7695-2156-8}, 
  doi = {http://dx.doi.org/10.1109/P2P.2004.44}, 
  url = {http://dx.doi.org/10.1109/P2P.2004.44}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/P2P\%2704\%20-\%20Reputation\%20management\%20framework.pdf},
}
Halevy:2002:LBE:646767.704291
@conference{Halevy:2002:LBE:646767.704291,
  title = {The LSD Broadcast Encryption Scheme}, 
  author = {Halevy, Dani and Shamir, Adi}, 
  booktitle = {CRYPTO'02--Proceedings of the 22nd Annual International Cryptology
        Conference on Advances in Cryptology}, 
  organization = {Springer-Verlag}, 
  year = {2002}, 
  month = {August}, 
  address = {Santa Barbara, CA, USA}, 
  pages = {47--60}, 
  publisher = {Springer-Verlag}, 
  series = {Lecture Notes in Computer Science}, 
  abstract = {Broadcast Encryption schemes enable a center to broadcast encrypted programs
        so that only designated subsets of users can decrypt each program. The stateless
        variant of this problem provides each user with a fixed set of keys which is
        never updated. The best scheme published so far for this problem is the "subset
        difference" (SD) technique of Naor Naor and Lotspiech, in which each one of the n
        users is initially given O(log2(n)) symmetric encryption keys. This allows the
        broadcaster to define at a later stage any subset of up to r users as "revoked",
        and to make the program accessible only to their complement by sending O(r) short
        messages before the encrypted program, and asking each user to perform an
        O(log(n)) computation. In this paper we describe the "Layered Subset Difference"
        (LSD) technique, which achieves the same goal with O(log1+{\textquestiondown}(n))
        keys, O(r) messages, and O(log(n)) computation. This reduces the number of keys
        given to each user by almost a square root factor without affecting the other
        parameters. In addition, we show how to use the same LSD keys in order to address
        any subset defined by a nested combination of inclusion and exclusion conditions
        with a number of messages which is proportional to the complexity of the
        description rather than to the size of the subset. The LSD scheme is truly
        practical, and makes it possible to broadcast an unlimited number of programs to
        256,000,000 possible customers by giving each new customer a smart card with one
        kilobyte of tamper-resistant memory. It is then possible to address any subset
        defined by t nested inclusion and exclusion conditions by sending less than 4t
        short messages, and the scheme remains secure even if all the other users form an
        adversarial coalition}, 
  www_section = {broadcast encryption scheme, encryption, LSD}, 
  isbn = {3-540-44050-X}, 
  url = {http://dl.acm.org/citation.cfm?id=646767.704291}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/CRYPTO\%2702\%20-\%20The\%20LSD\%20broadcast\%20encryption\%20scheme.pdf},
}
Hall01onalgorithms
@booklet{Hall01onalgorithms,
  title = {On Algorithms for Efficient Data Migration}, 
  author = {Joseph Hall and Jason D. Hartline and Anna R. Karlin and Jared Saia and John
        Wilkes}, 
  year = {2001}, 
  abstract = {The data migration problem is the problem of computing an efficient plan for
        moving data stored on devices in a network from one configuration to another.
        Load balancing or changing usage patterns could necessitate such a rearrangement
        of data. In this paper, we consider the case where the objects are fixed-size and
        the network is complete. The direct migration problem is closely related to
        edge-coloring. However, because there are space constraints on the devices, the
        problem is more complex. Our main results are polynomial time algorithms for
        finding a near-optimal migration plan in the presence of space constraints when a
        certain number of additional nodes is available as temporary storage, and a
        3/2-approximation for the case where data must be migrated directly to its
        destination}, 
  url = {http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.26.1365\&rep=rep1\&type=pdf},
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.26.1365.pdf}, 
%%%%% ERROR: Missing field
% www_section = {?????},
}
HanLLHP05
@conference{HanLLHP05,
  title = {A Random Walk Based Anonymous Peer-to-Peer Protocol Design}, 
  author = {Jinsong Han and Yunhao Liu and Li Lu and Lei Hu and Abhishek Patil}, 
  booktitle = {Proceedings of ICCNMC}, 
  organization = {Springer Berlin / Heidelberg}, 
  year = {2005}, 
  pages = {143--152}, 
  publisher = {Springer Berlin / Heidelberg}, 
  abstract = {Anonymity has been one of the most challenging issues in Ad Hoc environment
        such as P2P systems. In this paper, we propose an anonymous protocol called
        Random Walk based Anonymous Protocol (RWAP), in decentralized P2P systems. We
        evaluate RWAP by comprehensive trace driven simulations. Results show that RWAP
        significantly reduces traffic cost and encryption overhead compared with existing
        approaches}, 
  www_section = {anonymity, P2P, RWAP}, 
  isbn = {978-3-540-28102-3}, 
  doi = {10.1007/11534310}, 
  url = {http://www.springerlink.com/content/0642hvq80b27vv1f/}, 
}
Harren:2002:CQD:646334.687945
@conference{Harren:2002:CQD:646334.687945,
  title = {Complex Queries in DHT-based Peer-to-Peer Networks}, 
  author = {Harren, Matthew and Hellerstein, Joseph M. and Huebsch, Ryan and Boon Thau Loo
        and S Shenker and Ion Stoica}, 
  booktitle = {IPTPS'01--Revised Papers from the First International Workshop on
        Peer-to-Peer Systems}, 
  organization = {Springer-Verlag}, 
  year = {2002}, 
  month = {March}, 
  address = {Cambridge, MA, USA}, 
  pages = {242--259}, 
  publisher = {Springer-Verlag}, 
  series = {IPTPS '01}, 
  abstract = {Recently a new generation of P2P systems, offering distributed hash table
        (DHT) functionality, have been proposed. These systems greatly improve the
        scalability and exact-match accuracy of P2P systems, but offer only the
        exact-match query facility. This paper outlines a research agenda for building
        complex query facilities on top of these DHT-based P2P systems. We describe the
        issues involved and outline our research plan and current status}, 
  www_section = {distributed hash table}, 
  isbn = {3-540-44179-4}, 
  url = {http://dl.acm.org/citation.cfm?id=646334.687945}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/IPTPS\%2701\%20-\%20Complex\%20queries\%20in\%20DHT-based\%20p2p\%20networks.pdf},
}
Hartline:2008:OMD:1374376.1374390
@conference{Hartline:2008:OMD:1374376.1374390,
  title = {Optimal mechanism design and money burning}, 
  author = {Jason D. Hartline and Roughgarden, Tim}, 
  booktitle = {STOC'08. Proceedings of the 40th annual ACM Symposium on Theory of
        Computing}, 
  organization = {ACM}, 
  year = {2008}, 
  month = {May}, 
  address = {Victoria, British Columbia, Canada}, 
  pages = {75--84}, 
  publisher = {ACM}, 
  series = {STOC '08}, 
  abstract = {Mechanism design is now a standard tool in computer science for aligning the
        incentives of self-interested agents with the objectives of a system designer.
        There is, however, a fundamental disconnect between the traditional application
        domains of mechanism design (such as auctions) and those arising in computer
        science (such as networks): while monetary "transfers" (i.e., payments) are
        essential for most of the known positive results in mechanism design, they are
        undesirable or even technologically infeasible in many computer systems.
        Classical impossibility results imply that the reach of mechanisms without
        transfers is severely limited. Computer systems typically do have the ability to
        reduce service quality--routing systems can drop or delay traffic, scheduling
        protocols can delay the release of jobs, and computational payment schemes can
        require computational payments from users (e.g., in spam-fighting systems).
        Service degradation is tantamount to requiring that users "burn money", and such
        "payments" can be used to influence the preferences of the agents at a cost of
        degrading the social surplus. We develop a framework for the design and analysis
        of "money-burning mechanisms" to maximize the residual surplus-the total value of
        the chosen outcome minus the payments required. Our primary contributions are the
        following. * We define a general template for prior-free optimal mechanism design
        that explicitly connects Bayesian optimal mechanism design, the dominant paradigm
        in economics, with worst-case analysis. In particular, we establish a general and
        principled way to identify appropriate performance benchmarks in prior-free
        mechanism design. * For general single-parameter agent settings, we characterize
        the Bayesian optimal money-burning mechanism. * For multi-unit auctions, we
        design a near-optimal prior-free money-burning mechanism: for every valuation
        profile, its expected residual surplus is within a constant factor of our
        benchmark, the residual surplus of the best Bayesian optimal mechanism for this
        profile. * For multi-unit auctions, we quantify the benefit of general transfers
        over money-burning: optimal money-burning mechanisms always obtain a logarithmic
        fraction of the full social surplus, and this bound is tight}, 
  www_section = {mechanism design, money burning, optimal mechanism design}, 
  isbn = {978-1-60558-047-0}, 
  doi = {http://doi.acm.org/10.1145/1374376.1374390}, 
  url = {http://doi.acm.org/10.1145/1374376.1374390}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/STOC\%2708\%20-\%20Optimal\%20mechanism\%20design\%20and\%20money\%20burning.pdf},
}
Harvey:2003:SSO:1251460.1251469
@conference{Harvey:2003:SSO:1251460.1251469,
  title = {SkipNet: a scalable overlay network with practical locality properties}, 
  author = {Harvey, Nicholas J. A. and Michael B. Jones and Stefan Saroiu and Marvin
        Theimer and Wolman, Alec}, 
  booktitle = {Proceedings of the 4th conference on USENIX Symposium on Internet
        Technologies and Systems--Volume 4}, 
  organization = {USENIX Association}, 
  year = {2003}, 
  address = {Berkeley, CA, USA}, 
  pages = {9--9}, 
  publisher = {USENIX Association}, 
  series = {USITS'03}, 
  www_section = {distributed hash table, range queries, SkipNet}, 
  url = {http://dl.acm.org/citation.cfm?id=1251460.1251469}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/harvey.pdf}, 
}
Heimbigner00adaptingpublish/subscribe
@conference{Heimbigner00adaptingpublish/subscribe,
  title = {Adapting Publish/Subscribe Middleware to Achieve Gnutella-like Functionality}, 
  author = {Dennis Heimbigner}, 
  booktitle = {In Proc. of SAC}, 
  year = {2000}, 
  pages = {176--181}, 
  abstract = {Gnutella represents a new wave of peer-to-peer applications providing
        distributed discovery and sharing of resources across the Internet. Gnutella is
        distinguished by its support for anonymity and by its decentralized architecture.
        The current Gnutella architecture and protocol have numerous flaws with respect
        to efficiency, anonymity, and vulnerability to malicious actions. An alternative
        design is described that provides Gnutella-like functionality but removes or
        mitigates many of Gnutella's flaws. This design, referred to as Query/Advertise
        (Q/A) is based upon a scalable publish/subscribe middleware system called Sienab.
        A prototype implementation of Q/A is described. The relative benefits of this
        approach are discussed, and a number of open research problems are identified
        with respect to Q/A systems}, 
  url = {http://serl.cs.colorado.edu/~serl/papers/CU-CS-909-00.pdf}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/CU-CS-909-00.pdf}, 
%%%%% ERROR: Missing field
% www_section = {?????},
}
Helmy04efficientresource
@conference{Helmy04efficientresource,
  title = {Efficient Resource Discovery in Wireless AdHoc Networks: Contacts Do Help}, 
  author = {Ahmed Helmy}, 
  booktitle = {Chapter in: Resource Management in Wireless Networking}, 
  organization = {Kluwer Academic Publishers}, 
  year = {2004}, 
  publisher = {Kluwer Academic Publishers}, 
  abstract = {The resource discovery problem poses new challenges in infrastructure-less
        wireless networks. Due to the highly dynamic nature of these networks and their
        bandwidth and energy constraints, there is a pressing need for energy-aware
        communicationefficient resource discovery protocols. This chapter provides an
        overview of several approaches to resource discovery, discussing their
        suitability for classes of wireless networks. The approaches discussed in this
        chapter include flooding-based approaches, hierarchical cluster-based and
        dominating set schemes, and hybrid loose hierarchy architectures. Furthermore,
        the chapter provides a detailed case study on the design, evaluation and analysis
        of an energy-efficient resource discovery protocol based on hybrid loose
        hierarchy and utilizing the concept of {\textquoteleft}contacts'}, 
  url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.76.9310}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.76.9310.pdf}, 
%%%%% ERROR: Missing field
% www_section = {?????},
}
Heydon01thevesta
@booklet{Heydon01thevesta,
  title = {The Vesta Approach to Software Configuration Management}, 
  author = {Allan Heydon and Roy Levin and Timothy Mann and Yuan Yu}, 
  year = {2001}, 
  abstract = {Vesta is a system for software configuration management. It stores
        collections of source files, keeps track of which versions of which files go
        together, and automates the process of building a complete software artifact from
        its component pieces. Vesta's novel approach gives it three important properties.
        First, every build is repeatable, because its component sources and build tools
        are stored immutably and immortally, and its configuration description completely
        specifies what components and tools are used and how they are put together.
        Second, every build is incremental, because results of previous builds are cached
        and reused. Third, every build is consistent, because all build dependencies are
        automatically captured and recorded, so that a cached result from a previous
        build is reused only when doing so is certain to be correct. In addition, Vesta's
        flexible language for writing configuration descriptions makes it easy to
        describe large software configurations in a modular fashion and to create variant
        configurations by customizing build parameters. This paper gives a brief overview
        of Vesta, outlining Vesta's advantages over traditional tools, how those benefits
        are achieved, and the system's overall performance}, 
  url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.23.7370}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/SRC-RR-168.pdf}, 
%%%%% ERROR: Missing field
% www_section = {?????},
}
Hildrum03asymptoticallyefficient
@conference{Hildrum03asymptoticallyefficient,
  title = {Asymptotically Efficient Approaches to Fault-Tolerance in Peer-to-Peer}, 
  author = {Hildrum, Kirsten and John Kubiatowicz}, 
  booktitle = {In Proc. of DISC}, 
  year = {2003}, 
  pages = {321--336}, 
  abstract = {In this paper, we show that two peer-to-peer systems, Pastry [13] and
        Tapestry [17] can be made tolerant to certain classes of failures and a limited
        class of attacks. These systems are said to operate properly if they can find the
        closest node matching a requested ID. The system must also be able to dynamically
        construct the necessary routing information when new nodes enter or the network
        changes. We show that with an additional factor of storage overhead and
        communication overhead, they can continue to achieve both of these goals in the
        presence of a constant fraction nodes that do not obey the protocol. Our
        techniques are similar in spirit to those of Saia et al. [14] and Naor and Wieder
        [10]. Some simple simulations show that these techniques are useful even with
        constant overhead}, 
  www_section = {fault-tolerance, P2P}, 
  isbn = {978-3-540-20184-7}, 
  doi = {10.1007/b13831}, 
  url = {http://www.springerlink.com/content/7emt7u01cvbb6bu6/}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.4.334.pdf}, 
}
Hildrum:CSD-02-1178
@booklet{Hildrum:CSD-02-1178,
  title = {Distributed Data Location in a Dynamic Network}, 
  author = {Hildrum, Kirsten and John Kubiatowicz and Rao, Satish and Ben Y. Zhao}, 
  number = {UCB/CSD-02-1178}, 
  year = {2002}, 
  month = {April}, 
  publisher = {EECS Department, University of California, Berkeley}, 
  abstract = {Modern networking applications replicate data and services widely, leading to
        a need for location-independent routing -- the ability to route queries directly
        to objects using names that are independent of the objects' physical locations.
        Two important properties of a routing infrastructure are routing locality and
        rapid adaptation to arriving and departing nodes. We show how these two
        properties can be achieved with an efficient solution to the nearest-neighbor
        problem. We present a new distributed algorithm that can solve the
        nearest-neighbor problem for a restricted metric space. We describe our solution
        in the context of Tapestry, an overlay network infrastructure that employs
        techniques proposed by Plaxton, Rajaraman, and Richa}, 
  url = {http://www.eecs.berkeley.edu/Pubs/TechRpts/2002/5214.html}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/CSD-02-1178.pdf}, 
%%%%% ERROR: Missing field
% www_section = {?????},
}
Hof04SecureDistributedServiceDirectory
@conference{Hof04SecureDistributedServiceDirectory,
  title = {Design of a Secure Distributed Service Directory for Wireless Sensornetworks}, 
  author = {Hans-Joachim Hof and Erik-Oliver Blass and Thomas Fuhrmann and Martina
        Zitterbart}, 
  booktitle = {Proceedings of the First European Workshop on Wireless Sensor Networks}, 
  year = {2004}, 
  address = {Berlin, Germany}, 
  type = {publication}, 
  abstract = {Sensor networks consist of a potentially huge number of very small and
        resource limited self-organizing devices. This paper presents the design of a
        general distributed service directory architecture for sensor networks which
        especially focuses on the security issues in sensor networks. It ensures secure
        construction and maintenance of the underlying storage structure, a Content
        Addressable Network. It also considers integrity of the distributed service
        directory and secures communication between service provider and inquirer using
        self-certifying path names. Key area of application of this architecture are
        gradually extendable sensor networks where sensors and actuators jointly perform
        various user defined tasks, e.g., in the field of an office environment}, 
  www_section = {sensor networks}, 
  isbn = {978-3-540-20825-9}, 
  doi = {10.1007/978-3-540-24606-0_19}, 
  url = {http://i30www.ira.uka.de/research/publications/p2p/}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/scan.pdf}, 
}
HotOrNot
@conference{HotOrNot,
  title = {Hot or Not: Revealing Hidden Services by their Clock Skew}, 
  author = {Steven J. Murdoch}, 
  booktitle = {Proceedings of CCS 2006}, 
  organization = {ACM New York, NY, USA}, 
  year = {2006}, 
  month = {October}, 
  publisher = {ACM New York, NY, USA}, 
  abstract = {Location-hidden services, as offered by anonymity systems such as Tor, allow
        servers to be operated under a pseudonym. As Tor is an overlay network, servers
        hosting hidden services are accessible both directly and over the anonymous
        channel. Traffic patterns through one channel have observable effects on the
        other, thus allowing a service's pseudonymous identity and IP address to be
        linked. One proposed solution to this vulnerability is for Tor nodes to provide
        fixed quality of service to each connection, regardless of other traffic, thus
        reducing capacity but resisting such interference attacks. However, even if each
        connection does not influence the others, total throughput would still affect the
        load on the CPU, and thus its heat output. Unfortunately for anonymity, the
        result of temperature on clock skew can be remotely detected through observing
        timestamps. This attack works because existing abstract models of
        anonymity-network nodes do not take into account the inevitable imperfections of
        the hardware they run on. Furthermore, we suggest the same technique could be
        exploited as a classical covert channel and can even provide geolocation}, 
  www_section = {anonymity, clock skew, covert channels, fingerprinting, Tor}, 
  isbn = {1-59593-518-5}, 
  doi = {10.1145/1180405.1180410}, 
  url = {http://portal.acm.org/citation.cfm?id=1180410}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/HotOrNot.pdf}, 
}
Hubaux01thequest
@booklet{Hubaux01thequest,
  title = {The Quest for Security in Mobile Ad Hoc Networks}, 
  author = {Jean-Pierre Hubaux and Levente Butty{\'a}n and Srdan Capkun}, 
  year = {2001}, 
  url = {http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.130.6088\&rep=rep1\&type=pdf},
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.130.6088.pdf}, 
%%%%% ERROR: Missing field
% www_section = {?????},
}
Huebsch:2003:QIP:1315451.1315480
@conference{Huebsch:2003:QIP:1315451.1315480,
  title = {Querying the internet with PIER}, 
  author = {Huebsch, Ryan and Hellerstein, Joseph M. and Lanham, Nick and Boon Thau Loo and
        S Shenker and Ion Stoica}, 
  booktitle = {Proceedings of the 29th international conference on Very large data
        bases--Volume 29}, 
  organization = {VLDB Endowment}, 
  year = {2003}, 
  pages = {321--332}, 
  publisher = {VLDB Endowment}, 
  series = {VLDB '03}, 
  www_section = {distributed hash table, PIER, range queries}, 
  isbn = {0-12-722442-4}, 
  url = {http://dl.acm.org/citation.cfm?id=1315451.1315480}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/vldb03-pier.pdf}, 
}
Hurler_automaticcontext
@booklet{Hurler_automaticcontext,
  title = {Automatic Context Integration for Group Aware Environments}, 
  author = {Bernhard Hurler and Leo Petrak and Thomas Fuhrmann and Oliver Brand and Martina
        Zitterbart}, 
  year = {2003}, 
  abstract = {Tele-collaboration is a valuable tool that can connect learners at different
        sites and help them benefit from their respective competences. Albeit many
        e-learning applications provide a high level of technical sophistication, such
        tools typically fall short of reflecting the learners ' full context, e.g., their
        presence and awareness. Hence, these applications cause many disturbances in the
        social interaction of the learners. This paper describes mechanisms to improve
        the group awareness in elearning environments with the help of automatic
        integration of such context information from the physical world. This information
        is gathered by different embedded sensors in various objects, e.g., a coffee mug
        or an office chair. This paper also describes first results of the integration of
        these sensors into an existing CSCW/CSCL framework}, 
  url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.71.1450}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/hurler03context.pdf}, 
%%%%% ERROR: Missing field
% www_section = {?????},
}
IPTPS05
@conference{IPTPS05,
  title = {High Availability in DHTs: Erasure Coding vs. Replication}, 
  author = {Rodrigues, Rodrigo and Barbara Liskov}, 
  booktitle = {IPTPS'05--Proceedings of the 4th International Workshop in Peer-to-Peer
        Systems}, 
  organization = {Springer}, 
  volume = {3640}, 
  year = {2005}, 
  month = {February}, 
  address = {Ithaca, New York}, 
  publisher = {Springer}, 
  series = {Lecture Notes in Computer Science}, 
  abstract = {High availability in peer-to-peer DHTs requires data redundancy. This paper
        compares two popular redundancy schemes: replication and erasure coding. Unlike
        previous comparisons, we take the characteristics of the nodes that comprise the
        overlay into account, and conclude that in some cases the benefits from coding
        are limited, and may not be worth its disadvantages}, 
  www_section = {distributed hash table, erasure coding, high availability, peer-to-peer
        networking, redundancy, Replication}, 
  doi = {10.1007/11558989_21}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/IPTPS\%2705\%20-\%20High\%20availability\%20in\%20DHTs\%3A\%20erasure\%20coding\%20vs.\%20replication.pdf},
}
ISDN-mixes
@conference{ISDN-mixes,
  title = {ISDN-mixes: Untraceable communication with very small bandwidth overhead}, 
  author = {Andreas Pfitzmann and Birgit Pfitzmann and Michael Waidner}, 
  booktitle = {Proceedings of the GI/ITG Conference on Communication in Distributed
        Systems}, 
  organization = {Springer-Verlag London, UK}, 
  year = {1991}, 
  month = {February}, 
  pages = {451--463}, 
  publisher = {Springer-Verlag London, UK}, 
  abstract = {Untraceable communication for services like telephony is often considered
        infeasible in the near future because of bandwidth limitations. We present a
        technique, called ISDN-MIXes, which shows that this is not the case. As little
        changes as possible are made to the narrowband-ISDN planned by the PTTs. In
        particular, we assume the same subscriber lines with the same bit rate, and the
        same long-distance network between local exchanges, and we offer the same
        services. ISDN-MIXes are a combination of a new variant of CHAUM's MIXes, dummy
        traffic on the subscriber lines (where this needs no additional bandwidth), and
        broadcast of incoming-call messages in the subscriber-area}, 
  isbn = {3-540-53721-X}, 
  url = {http://portal.acm.org/citation.cfm?id=645662.664536}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.43.4892.pdf}, 
  www_section = {Unsorted}, 
}
Iii_keylessjam
@booklet{Iii_keylessjam,
  title = {Keyless Jam Resistance}, 
  author = {Leemon C. Baird and William L. Bahn and Michael D. Collins and Martin C.
        Carlisle and Sean C. Butler}, 
  year = {2007}, 
  abstract = {has been made resistant to jamming by the use of a secret key that is shared
        by the sender and receiver. There are no known methods for achieving jam
        resistance without that shared key. Unfortunately, wireless communication is now
        reaching a scale and a level of importance where such secret-key systems are
        becoming impractical. For example, the civilian side of the Global Positioning
        System (GPS) cannot use a shared secret, since that secret would have to be given
        to all 6.5 billion potential users, and so would no longer be secret. So civilian
        GPS cannot currently be protected from jamming. But the FAA has stated that the
        civilian airline industry will transition to using GPS for all navigational aids,
        even during landings. A terrorist with a simple jamming system could wreak havoc
        at a major airport. No existing system can solve this problem, and the problem
        itself has not even been widely discussed. The problem of keyless jam resistance
        is important. There is a great need for a system that can broadcast messages
        without any prior secret shared between the sender and receiver. We propose the
        first system for keyless jam resistance: the BBC algorithm. We describe the
        encoding, decoding, and broadcast algorithms. We then analyze it for expected
        resistance to jamming and error rates. We show that BBC can achieve the same
        level of jam resistance as traditional spread spectrum systems, at just under
        half the bit rate, and with no shared secret. Furthermore, a hybrid system can
        achieve the same average bit rate as traditional systems}, 
  www_section = {GPS}, 
  url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.91.8217}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.91.8217.pdf}, 
}
Infocom2007-SNS
@conference{Infocom2007-SNS,
  title = {Implications of Selfish Neighbor Selection in Overlay Networks}, 
  author = {Nikolaos Laoutaris and Georgios Smaragdakis and Azer Bestavros and Byers, John
        W.}, 
  booktitle = {Proceedings of IEEE INFOCOM 2007}, 
  year = {2007}, 
  month = {May}, 
  address = {Anchorage, AK}, 
  www_section = {EGOIST, game theory, routing}, 
  url = {www.cs.bu.edu/techreports/pdf/2006-019-selfish-neighbor-selection.pdf}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Infocom2007-sns.pdf}, 
}
Infocom2008
@conference{Infocom2008,
  title = {Swarming on Optimized Graphs for n-way Broadcast}, 
  author = {Georgios Smaragdakis and Nikolaos Laoutaris and Pietro Michiardi and Azer
        Bestavros and Byers, John W. and Mema Roussopoulos}, 
  booktitle = {Proceedings of IEEE INFOCOM 2008}, 
  year = {2008}, 
  month = {April}, 
  address = {Phoenix, AZ}, 
  abstract = {In an n-way broadcast application each one of n overlay nodes wants to push
        its own distinct large data file to all other n-1 destinations as well as
        download their respective data files. BitTorrent-like swarming protocols are
        ideal choices for handling such massive data volume transfers. The original
        BitTorrent targets one-to-many broadcasts of a single file to a very large number
        of receivers and thus, by necessity, employs an almost random overlay topology.
        n-way broadcast applications on the other hand, owing to their inherent n-squared
        nature, are realizable only in small to medium scale networks. In this paper, we
        show that we can leverage this scale constraint to construct optimized overlay
        topologies that take into consideration the end-to-end characteristics of the
        network and as a consequence deliver far superior performance compared to random
        and myopic (local) approaches. We present the Max-Min and Max- Sum peer-selection
        policies used by individual nodes to select their neighbors. The first one
        strives to maximize the available bandwidth to the slowest destination, while the
        second maximizes the aggregate output rate. We design a swarming protocol
        suitable for n-way broadcast and operate it on top of overlay graphs formed by
        nodes that employ Max-Min or Max-Sum policies. Using trace-driven simulation and
        measurements from a PlanetLab prototype implementation, we demonstrate that the
        performance of swarming on top of our constructed topologies is far superior to
        the performance of random and myopic overlays. Moreover, we show how to modify
        our swarming protocol to allow it to accommodate selfish nodes}, 
  www_section = {EGOIST, game theory, routing}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Infocom2008.pdf}, 
}
Irwin:2005:SVC:1080192.1080194
@conference{Irwin:2005:SVC:1080192.1080194,
  title = {Self-recharging virtual currency}, 
  author = {Irwin, David and Chase, Jeff and Grit, Laura and Yumerefendi, Aydan}, 
  booktitle = {P2PECON'05. Proceedings of the 2005 ACM SIGCOMM Workshop on Economics of
        Peer-to-Peer Systems}, 
  organization = {ACM}, 
  year = {2005}, 
  month = {August}, 
  address = {Philadelphia, Pennsylvania, USA}, 
  pages = {93--98}, 
  publisher = {ACM}, 
  series = {P2PECON '05}, 
  abstract = {Market-based control is attractive for networked computing utilities in which
        consumers compete for shared resources (computers, storage, network bandwidth).
        This paper proposes a new self-recharging virtual currency model as a common
        medium of exchange in a computational market. The key idea is to recycle currency
        through the economy automatically while bounding the rate of spending by
        consumers. Currency budgets may be distributed among consumers according to any
        global policy; consumers spend their budgets to schedule their resource usage
        through time, but cannot hoard their currency or starve.We outline the design and
        rationale for self-recharging currency in Cereus, a system for market-based
        community resource sharing, in which participants are authenticated and sanctions
        are sufficient to discourage fraudulent behavior. Currency transactions in Cereus
        are accountable: offline third-party audits can detect and prove cheating, so
        participants may transfer and recharge currency autonomously without involvement
        of the trusted banking service}, 
  www_section = {market, virtual currency}, 
  isbn = {1-59593-026-4}, 
  doi = {http://doi.acm.org/10.1145/1080192.1080194}, 
  url = {http://doi.acm.org/10.1145/1080192.1080194}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/P2PECON\%2705\%20-\%20Self-recharging\%20virtual\%20currency.pdf},
}
Isdal:2010:PPD:1851275.1851198
@article{Isdal:2010:PPD:1851275.1851198,
  title = {Privacy-preserving P2P data sharing with OneSwarm}, 
  author = {Isdal, Tomas and Piatek, Michael and Krishnamurthy, Arvind and Anderson,
        Thomas}, 
  journal = {SIGCOMM Comput. Commun. Rev}, 
  volume = {40}, 
  number = {4}, 
  year = {2010}, 
  address = {New York, NY, USA}, 
  pages = {111--122}, 
  publisher = {ACM}, 
  www_section = {anonymity, OneSwarm, p2p network}, 
  issn = {0146-4833}, 
  doi = {10.1145/1851275.1851198}, 
  url = {http://doi.acm.org/10.1145/1851275.1851198}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/oneswarm_SIGCOMM.pdf}, 
}
Jannotti:2000:ORM:1251229.1251243
@conference{Jannotti:2000:ORM:1251229.1251243,
  title = {Overcast: reliable multicasting with on overlay network}, 
  author = {Jannotti, John and Gifford, David K. and Johnson, Kirk L. and Frans M. Kaashoek
        and O'Toole Jr., James W.}, 
  booktitle = {OSDI'00. Proceedings of the 4th conference on Symposium on Operating System
        Design \& Implementation}, 
  organization = {USENIX Association}, 
  year = {2000}, 
  month = {October}, 
  address = {San Diego, California, USA}, 
  pages = {14--14}, 
  publisher = {USENIX Association}, 
  series = {OSDI'00}, 
  abstract = {Overcast is an application-level multicasting system that can be
        incrementally deployed using today's Internet infrastructure. These properties
        stem from Overcast's implementation as an overlay network. An overlay network
        consists of a collection of nodes placed at strategic locations in an existing
        network fabric. These nodes implement a network abstraction on top of the network
        provided by the underlying substrate network. Overcast provides scalable and
        reliable single-source multicast using a simple protocol for building efficient
        data distribution trees that adapt to changing network conditions. To support
        fast joins, Overcast implements a new protocol for efficiently tracking the
        global status of a changing distribution tree. Results based on simulations
        confirm that Overcast provides its added functionality while performing
        competitively with IP Multicast. Simulations indicate that Overcast quickly
        builds bandwidth-efficient distribution trees that, compared to IP Multicast,
        provide 70\%-100\% of the total bandwidth possible, at a cost of somewhat less
        than twice the network load. In addition, Overcast adapts quickly to changes
        caused by the addition of new nodes or the failure of existing nodes without
        causing undue load on the multicast source}, 
  www_section = {overcast, overlay network}, 
  url = {http://dl.acm.org/citation.cfm?id=1251229.1251243}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/OSDI\%2700\%20-\%20Overcast.pdf},
}
Jelasity:2005:GAL:1082469.1082470
@article{Jelasity:2005:GAL:1082469.1082470,
  title = {Gossip-based aggregation in large dynamic networks}, 
  author = {M{\'a}rk Jelasity and Alberto Montresor and Babaoglu, Ozalp}, 
  journal = {ACM Transactions on Computer Systems}, 
  volume = {23}, 
  year = {2005}, 
  month = {August}, 
  address = {New York, NY, USA}, 
  pages = {219--252}, 
  publisher = {ACM}, 
  abstract = {As computer networks increase in size, become more heterogeneous and span
        greater geographic distances, applications must be designed to cope with the very
        large scale, poor reliability, and often, with the extreme dynamism of the
        underlying network. Aggregation is a key functional building block for such
        applications: it refers to a set of functions that provide components of a
        distributed system access to global information including network size, average
        load, average uptime, location and description of hotspots, and so on. Local
        access to global information is often very useful, if not indispensable for
        building applications that are robust and adaptive. For example, in an industrial
        control application, some aggregate value reaching a threshold may trigger the
        execution of certain actions; a distributed storage system will want to know the
        total available free space; load-balancing protocols may benefit from knowing the
        target average load so as to minimize the load they transfer. We propose a
        gossip-based protocol for computing aggregate values over network components in a
        fully decentralized fashion. The class of aggregate functions we can compute is
        very broad and includes many useful special cases such as counting, averages,
        sums, products, and extremal values. The protocol is suitable for extremely large
        and highly dynamic systems due to its proactive structure---all nodes receive the
        aggregate value continuously, thus being able to track any changes in the system.
        The protocol is also extremely lightweight, making it suitable for many
        distributed applications including peer-to-peer and grid computing systems. We
        demonstrate the efficiency and robustness of our gossip-based protocol both
        theoretically and experimentally under a variety of scenarios including node and
        communication failures}, 
  www_section = {Gossip-based protocols, proactive aggregation}, 
  issn = {0734-2071}, 
  doi = {http://doi.acm.org/10.1145/1082469.1082470}, 
  url = {http://doi.acm.org/10.1145/1082469.1082470}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Jelasity\%2C\%20Montresor\%20\%26\%20Babaoglu\%20-\%20Gossip-based\%20aggregation.pdf},
}
Jian:2008:WSP:1409540.1409546
@conference{Jian:2008:WSP:1409540.1409546,
  title = {Why Share in Peer-to-Peer Networks?}, 
  author = {Jian, Lian and MacKie-Mason, Jeffrey K.}, 
  booktitle = {EC'08. Proceedings of the 10th International Conference on Electronic
        Commerce}, 
  organization = {ACM}, 
  year = {2008}, 
  month = {August}, 
  address = {Innsbruck, Austria}, 
  pages = {4:1--4:8}, 
  publisher = {ACM}, 
  series = {ICEC '08}, 
  abstract = {Prior theory and empirical work emphasize the enormous free-riding problem
        facing peer-to-peer (P2P) sharing networks. Nonetheless, many P2P networks
        thrive. We explore two possible explanations that do not rely on altruism or
        explicit mechanisms imposed on the network: direct and indirect private
        incentives for the provision of public goods. The direct incentive is a traffic
        redistribution effect that advantages the sharing peer. We find this incentive is
        likely insufficient to motivate equilibrium content sharing in large networks. We
        then approach P2P networks as a graph-theoretic problem and present sufficient
        conditions for sharing and free-riding to co-exist due to indirect incentives we
        call generalized reciprocity}, 
  www_section = {file-sharing, networks, P2P, peer-to-peer networking}, 
  isbn = {978-1-60558-075-3}, 
  doi = {http://doi.acm.org/10.1145/1409540.1409546}, 
  url = {http://doi.acm.org/10.1145/1409540.1409546}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/EC\%2708\%20-\%20Why\%20share\%20in\%20peer-to-peer\%20networks.pdf},
}
Jiang_trustand
@book{Jiang_trustand,
  title = {Trust and Cooperation in Peer-to-Peer Systems}, 
  author = {Junjie Jiang and Haihuan Bai and Weinong Wang}, 
  booktitle = {Lecture Notes in Computer Science}, 
  organization = {Springer Berlin / Heidelberg}, 
  volume = {3032}, 
  year = {2004}, 
  pages = {371--378}, 
  publisher = {Springer Berlin / Heidelberg}, 
  abstract = {Most of the past studies on peer-to-peer systems have emphasized routing and
        lookup. The selfishness of users, which brings on the free riding problem, has
        not attracted sufficient attention from researchers. In this paper, we introduce
        a decentralized reputation-based trust model first, in which trust relationships
        could be built based on the reputation of peers. Subsequently, we use the
        iterated prisoner's dilemma to model the interactions in peer-to-peer systems and
        propose a simple incentive mechanism. By simulations, it's shown that the stable
        cooperation can emerge after limited rounds of interaction between peers by using
        the incentive mechanism}, 
  www_section = {cooperation, incentives, iterated prisoner's dilemma, peer-to-peer
        networking}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Jiang\%2C\%20Bai\%20\%26\%20Wang\%20-\%20Trust\%20and\%20Cooperation\%20in\%20Peer-to-Peer\%20Systems.pdf},
}
Jun:2005:IBI:1080192.1080199
@conference{Jun:2005:IBI:1080192.1080199,
  title = {Incentives in BitTorrent Induce Free Riding}, 
  author = {Jun, Seung and Ahamad, Mustaque}, 
  booktitle = {P2PECON'05. Proceedings of the 2005 ACM SIGCOMM Workshop on Economics of
        Peer-to-Peer Systems}, 
  organization = {ACM}, 
  year = {2005}, 
  month = {August}, 
  address = {Philadelphia, Pennsylvania, USA}, 
  pages = {116--121}, 
  publisher = {ACM}, 
  series = {P2PECON '05}, 
  abstract = {We investigate the incentive mechanism of BitTorrent, which is a peer-to-peer
        file distribution system. As downloaders in BitTorrent are faced with the
        conflict between the eagerness to download and the unwillingness to upload, we
        relate this problem to the iterated prisoner's dilemma, which suggests guidelines
        to design a good incentive mechanism. Based on these guidelines, we propose a
        new, simple incentive mechanism. Our analysis and the experimental results using
        PlanetLab show that the original incentive mechanism of BitTorrent can induce
        free riding because it is not effective in rewarding and punishing downloaders
        properly. In contrast, a new mechanism proposed by us is shown to be more robust
        against free riders}, 
  www_section = {BitTorrent, data dissemination, prisoner's dilemma, strategy}, 
  isbn = {1-59593-026-4}, 
  doi = {http://doi.acm.org/10.1145/1080192.1080199}, 
  url = {http://doi.acm.org/10.1145/1080192.1080199}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/P2PECON\%2705\%20-\%20Incentives\%20in\%20BitTorrent\%20induce\%20free\%20riding.pdf},
}
Junges:2008:EPD:1402298.1402308
@conference{Junges:2008:EPD:1402298.1402308,
  title = {Evaluating the performance of DCOP algorithms in a real world, dynamic problem}, 
  author = {Junges, Robert and Bazzan, Ana L. C.}, 
  booktitle = {AAMAS8--Proceedings of the 7th international joint conference on Autonomous
        agents and multiagent systems}, 
  organization = {International Foundation for Autonomous Agents and Multiagent Systems}, 
  year = {2008}, 
  month = {May}, 
  address = {Estoril, Portugal}, 
  pages = {599--606}, 
  publisher = {International Foundation for Autonomous Agents and Multiagent Systems}, 
  series = {AAMAS '08}, 
  abstract = {Complete algorithms have been proposed to solve problems modelled as
        distributed constraint optimization (DCOP). However, there are only few attempts
        to address real world scenarios using this formalism, mainly because of the
        complexity associated with those algorithms. In the present work we compare three
        complete algorithms for DCOP, aiming at studying how they perform in complex and
        dynamic scenarios of increasing sizes. In order to assess their performance we
        measure not only standard quantities such as number of cycles to arrive to a
        solution, size and quantity of exchanged messages, but also computing time and
        quality of the solution which is related to the particular domain we use. This
        study can shed light in the issues of how the algorithms perform when applied to
        problems other than those reported in the literature (graph coloring, meeting
        scheduling, and distributed sensor network)}, 
  www_section = {coordination, DCOP, distributed constraint optimization, traffic control}, 
  isbn = {978-0-9817381-1-6}, 
  url = {http://dl.acm.org/citation.cfm?id=1402298.1402308}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/AAMAS08\%20-\%20DCOP\%20algorithms\%20in\%20a\%20real\%20world\%20problem.pdf},
}
Kaafar:2007:SIC:1282427.1282388
@article{Kaafar:2007:SIC:1282427.1282388,
  title = {Securing Internet Coordinate Embedding Systems}, 
  author = {Kaafar, Mohamed Ali and Laurent Mathy and Barakat, Chadi and Salamatian, Kave
        and Turletti, Thierry and Dabbous, Walid}, 
  journal = {SIGCOMM Computer Communication Review}, 
  volume = {37}, 
  year = {2007}, 
  month = {August}, 
  address = {New York, NY, USA}, 
  pages = {61--72}, 
  publisher = {ACM}, 
  abstract = {This paper addresses the issue of the security of Internet Coordinate
        Systems,by proposing a general method for malicious behavior detection during
        coordinate computations. We first show that the dynamics of a node, in a
        coordinate system without abnormal or malicious behavior, can be modeled by a
        Linear State Space model and tracked by a Kalman filter. Then we show, that the
        obtained model can be generalized in the sense that the parameters of a
        filtercalibrated at a node can be used effectively to model and predict the
        dynamic behavior at another node, as long as the two nodes are not too far apart
        in the network. This leads to the proposal of a Surveyor infrastructure: Surveyor
        nodes are trusted, honest nodes that use each other exclusively to position
        themselves in the coordinate space, and are therefore immune to malicious
        behavior in the system.During their own coordinate embedding, other nodes can
        thenuse the filter parameters of a nearby Surveyor as a representation of normal,
        clean system behavior to detect and filter out abnormal or malicious activity. A
        combination of simulations and PlanetLab experiments are used to demonstrate the
        validity, generality, and effectiveness of the proposed approach for two
        representative coordinate embedding systems, namely Vivaldi and NPS}, 
  www_section = {internet coordinates-embedding systems, kalman filter, malicious behavior
        detection, network positioning systems, security}, 
  issn = {0146-4833}, 
  doi = {http://doi.acm.org/10.1145/1282427.1282388}, 
  url = {http://doi.acm.org/10.1145/1282427.1282388}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/SIGCOMM\%20Comput.\%20Commun.\%20Rev.\%20-\%20Securing\%20Internet\%20Coordinate\%20Embedding\%20Systems.pdf},
}
Kamvar:2003:EAR:775152.775242
@conference{Kamvar:2003:EAR:775152.775242,
  title = {The EigenTrust algorithm for reputation management in P2P networks}, 
  author = {Kamvar, Sepandar D. and Schlosser, Mario T. and Hector Garcia-Molina}, 
  booktitle = {WWW'03. Proceedings of the 12th International Conference on World Wide Web}, 
  organization = {ACM}, 
  year = {2003}, 
  month = {May}, 
  address = {Budapest, Hungary}, 
  pages = {640--651}, 
  publisher = {ACM}, 
  series = {WWW '03}, 
  abstract = {Peer-to-peer file-sharing networks are currently receiving much attention as
        a means of sharing and distributing information. However, as recent experience
        shows, the anonymous, open nature of these networks offers an almost ideal
        environment for the spread of self-replicating inauthentic files.We describe an
        algorithm to decrease the number of downloads of inauthentic files in a
        peer-to-peer file-sharing network that assigns each peer a unique global trust
        value, based on the peer's history of uploads. We present a distributed and
        secure method to compute global trust values, based on Power iteration. By having
        peers use these global trust values to choose the peers from whom they download,
        the network effectively identifies malicious peers and isolates them from the
        network.In simulations, this reputation system, called EigenTrust, has been shown
        to significantly decrease the number of inauthentic files on the network, even
        under a variety of conditions where malicious peers cooperate in an attempt to
        deliberately subvert the system}, 
  www_section = {distributed eigenvector computation, peer-to-peer networking, reputation}, 
  isbn = {1-58113-680-3}, 
  doi = {http://doi.acm.org/10.1145/775152.775242}, 
  url = {http://doi.acm.org/10.1145/775152.775242}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/WWW\%2703\%20-\%20The\%20EigenTrust\%20algorithm.pdf},
}
Karlof02securerouting
@conference{Karlof02securerouting,
  title = {Secure Routing in Wireless Sensor Networks: Attacks and Countermeasures}, 
  author = {Chris Karlof and David Wagner}, 
  booktitle = {In First IEEE International Workshop on Sensor Network Protocols and
        Applications}, 
  year = {2002}, 
  pages = {113--127}, 
  abstract = {We consider routing security in wireless sensor networks. Many sensor network
        routing protocols have been proposed, but none of them have been designed with
        security as a goal. We propose security goals for routing in sensor networks,
        show how attacks against ad-hoc and peer-to-peer networks can be adapted into
        powerful attacks against sensor networks, introduce two classes of novel attacks
        against sensor networks --- sinkholes and HELLO floods, and analyze the security
        of all the major sensor network routing protocols. We describe crippling attacks
        against all of them and suggest countermeasures and design considerations. This
        is the first such analysis of secure routing in sensor networks}, 
  www_section = {ad-hoc networks, P2P, sensor networks}, 
  url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.13.4672}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/sensor-route-security_0.pdf},
}
Karnstedt2006SimilarityQueries
@conference{Karnstedt2006SimilarityQueries,
  title = {Similarity Queries on Structured Data in Structured Overlays}, 
  author = {Karnstedt, Marcel and Sattler, Kai-Uwe and Manfred Hauswirth and Roman
        Schmidt}, 
  booktitle = {Proceedings of the 22nd International Conference on Data Engineering
        Workshops}, 
  organization = {IEEE Computer Society}, 
  year = {2006}, 
  address = {Washington, DC, USA}, 
  pages = {0--32}, 
  publisher = {IEEE Computer Society}, 
  series = {ICDEW '06}, 
  isbn = {0-7695-2571-7}, 
  doi = {10.1109/ICDEW.2006.137}, 
  url = {http://dx.doi.org/10.1109/ICDEW.2006.137}, 
%%%%% ERROR: Missing field
% www_section = {?????},
}
Karp2004/ALGO
@conference{Karp2004/ALGO,
  title = {Finite length analysis of LT codes}, 
  author = {Richard Karp and Luby, Michael and M. Amin Shokrollahi}, 
  booktitle = {Proceedings of the IEEE International Symposium on Information Theory, ISIT
        2004}, 
  year = {2004}, 
  month = {January}, 
  pages = {0--39}, 
  abstract = {This paper provides an efficient method for analyzing the error probability
        of the belief propagation (BP) decoder applied to LT Codes. Each output symbol is
        generated independently by sampling from a distribution and adding the input
        symbols corresponding to the support of the sampled vector}, 
  www_section = {algoweb_ldpc}, 
  isbn = {0-7695-1822-2}, 
  doi = {10.1109/ISIT.2004.1365074}, 
  url = {http://ieeexplore.ieee.org/xpl/freeabs_all.jsp?arnumber=1181950}, 
}
Kermarrec2013
@article{Kermarrec2013,
  title = {Towards a Personalized Internet: a Case for a Full Decentralization}, 
  author = {Kermarrec, Anne-Marie}, 
  journal = {Philosophical Transactions. Series A, Mathematical, Physical, and Engineering
        Sciences}, 
  volume = {371}, 
  number = {1987}, 
  year = {2013}, 
  month = {March}, 
  abstract = {The Web has become a user-centric platform where users post, share, annotate,
        comment and forward content be it text, videos, pictures, URLs, etc. This social
        dimension creates tremendous new opportunities for information exchange over the
        Internet, as exemplified by the surprising and exponential growth of social
        networks and collaborative platforms. Yet, niche content is sometimes difficult
        to retrieve using traditional search engines because they target the mass rather
        than the individual. Likewise, relieving users from useless notification is
        tricky in a world where there is so much information and so little of interest
        for each and every one of us. We argue that ultra-specific content could be
        retrieved and disseminated should search and notification be personalized to fit
        this new setting. We also argue that users' interests should be implicitly
        captured by the system rather than relying on explicit classifications simply
        because the world is by nature unstructured, dynamic and users do not want to be
        hampered in their actions by a tight and static framework. In this paper, we
        review some existing personalization approaches, most of which are centralized.
        We then advocate the need for fully decentralized systems because personalization
        raises two main issues. Firstly, personalization requires information to be
        stored and maintained at a user granularity which can significantly hurt the
        scalability of a centralized solution. Secondly, at a time when the
        {\textquoteleft}big brother is watching you' attitude is prominent, users may be
        more and more reluctant to give away their personal data to the few large
        companies that can afford such personalization. We start by showing how to
        achieve personalization in decentralized systems and conclude with the research
        agenda ahead}, 
  issn = {1364-503X}, 
  doi = {10.1098/rsta.2012.0380}, 
  www_section = {Unsorted}, 
  url = {https://bibliography.gnunet.org}, 
}
Khorshadi:2005:DPR:1090948.1091369
@conference{Khorshadi:2005:DPR:1090948.1091369,
  title = {Determining the Peer Resource Contributions in a P2P Contract}, 
  author = {Khorshadi, Behrooz and Liu, Xin and Dipak Ghosal}, 
  booktitle = {HOT-P2P 2005. Proceedings of the Second International Workshop on Hot Topics
        in Peer-to-Peer Systems}, 
  organization = {IEEE Computer Society}, 
  year = {2005}, 
  month = {July}, 
  address = {La Jolla, California, USA}, 
  pages = {2--9}, 
  publisher = {IEEE Computer Society}, 
  abstract = {In this paper we study a scheme called P2P contract which explicitly
        specifies the resource contributions that are required from the peers. In
        particular, we consider a P2P file sharing system in which when a peer downloads
        the file it is required to serve the file to upto N other peers within a maximum
        period of time T. We study the behavior of this contribution scheme in both
        centralized and decentralized P2P networks. In a centralized architecture, new
        requests are forwarded to a central server which hands out the contract along
        with a list of peers from where the file can be downloaded. We show that a simple
        fixed contract (i.e., fixed values of N and T) is sufficient to create the
        required server capacity which adapts to the load. Furthermore, we show that T,
        the time part of the contract is a more important control parameter than N. In
        the case of a decentralized P2P architecture, each new request is broadcast to a
        certain neighborhood determined by the time-to-live (TTL) parameter. Each server
        receiving the request independently doles out a contract and the requesting peer
        chooses the one which is least constraining. If there are no servers in the
        neighborhood, the request fails. To achieve a good request success ratio, we
        propose an adaptive scheme to set the contracts without requiring global
        information. Through both analysis and simulation, we show that the proposed
        scheme adapts to the load and achieves low request failure rate with high server
        efficiency}, 
  www_section = {contracts, P2P, peer resource contribution, peer-to-peer networking}, 
  isbn = {0-7695-2417-6}, 
  doi = {10.1109/HOT-P2P.2005.9}, 
  url = {http://dl.acm.org/citation.cfm?id=1090948.1091369}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/HOT-P2P\%2705\%20-\%20Khorshadi\%2C\%20Liu\%20\%26\%20Ghosal.pdf},
}
Kiran04totalrecall:
@conference{Kiran04totalrecall:,
  title = {Total Recall: System Support for Automated Availability Management}, 
  author = {Ranjita Bhagwan Kiran and Kiran Tati and Yu-chung Cheng and Stefan Savage and
        Geoffrey M. Voelker}, 
  booktitle = {In NSDI}, 
  year = {2004}, 
  pages = {337--350}, 
  abstract = {Availability is a storage system property that is both highly desired and yet
        minimally engineered. While many systems provide mechanisms to improve
        availability--such as redundancy and failure recovery--how to best configure
        these mechanisms is typically left to the system manager. Unfortunately, few
        individuals have the skills to properly manage the trade-offs involved, let alone
        the time to adapt these decisions to changing conditions. Instead, most systems
        are configured statically and with only a cursory understanding of how the
        configuration will impact overall performance or availability. While this issue
        can be problematic even for individual storage arrays, it becomes increasingly
        important as systems are distributed--and absolutely critical for the wide-area
        peer-to-peer storage infrastructures being explored. This paper describes the
        motivation, architecture and implementation for a new peer-to-peer storage
        system, called TotalRecall, that automates the task of availability management.
        In particular, the TotalRecall system automatically measures and estimates the
        availability of its constituent host components, predicts their future
        availability based on past behavior, calculates the appropriate redundancy
        mechanisms and repair policies, and delivers user-specified availability while
        maximizing efficiency}, 
  www_section = {P2P}, 
  url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.10.9775}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/recall.pdf}, 
}
Kleinberg:2004:NFD:982792.982803
@conference{Kleinberg:2004:NFD:982792.982803,
  title = {Network failure detection and graph connectivity}, 
  author = {Kleinberg, Jon and Sandler, Mark and Slivkins, Aleksandrs}, 
  booktitle = {SODA'04--Proceedings of the Fifteenth Annual ACM-SIAM Symposium on Discrete
        Algorithms}, 
  organization = {Society for Industrial and Applied Mathematics}, 
  year = {2004}, 
  month = {January}, 
  address = {New Orleans, Louisiana}, 
  pages = {76--85}, 
  publisher = {Society for Industrial and Applied Mathematics}, 
  series = {SODA '04}, 
  abstract = {We consider a model for monitoring the connectivity of a network subject to
        node or edge failures. In particular, we are concerned with detecting
        ({\epsilon}, k)-failures: events in which an adversary deletes up to network
        elements (nodes or edges), after which there are two sets of nodes A and B, each
        at least an {\epsilon} fraction of the network, that are disconnected from one
        another. We say that a set D of nodes is an ({\epsilon} k)-detection set if, for
        any ({\epsilon} k)-failure of the network, some two nodes in D are no longer able
        to communicate; in this way, D "witnesses" any such failure. Recent results show
        that for any graph G, there is an is ({\epsilon} k)-detection set of size bounded
        by a polynomial in k and {\epsilon}, independent of the size of G.In this paper,
        we expose some relationships between bounds on detection sets and the
        edge-connectivity {\lambda} and node-connectivity {\kappa} of the underlying
        graph. Specifically, we show that detection set bounds can be made considerably
        stronger when parameterized by these connectivity values. We show that for an
        adversary that can delete {\kappa}{\lambda} edges, there is always a detection
        set of size O(({\kappa}/{\epsilon}) log (1/{\epsilon})) which can be found by
        random sampling. Moreover, an ({\epsilon}, \&lambda)-detection set of minimum
        size (which is at most 1/{\epsilon}) can be computed in polynomial time. A
        crucial point is that these bounds are independent not just of the size of G but
        also of the value of {\lambda}.Extending these bounds to node failures is much
        more challenging. The most technically difficult result of this paper is that a
        random sample of O(({\kappa}/{\epsilon}) log (1/{\epsilon})) nodes is a detection
        set for adversaries that can delete a number of nodes up to {\kappa}, the
        node-connectivity.For the case of edge-failures we use VC-dimension techniques
        and the cactus representation of all minimum edge-cuts of a graph; for node
        failures, we develop a novel approach for working with the much more complex set
        of all minimum node-cuts of a graph}, 
  www_section = {failure detection, graph connectivity, network}, 
  isbn = {0-89871-558-X}, 
  url = {http://dl.acm.org/citation.cfm?id=982792.982803}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/SODA\%2704\%20-\%20Network\%20failure\%20detection\%20and\%20graph\%20connectivity\%250A.pdf},
}
Klemm03aspecial-purpose
@conference{Klemm03aspecial-purpose,
  title = {A Special-Purpose Peer-to-Peer File Sharing System for Mobile Ad Hoc Networks}, 
  author = {Alexander Klemm and Er Klemm and Christoph Lindemann and Oliver Waldhorst}, 
  booktitle = {A Special-Purpose Peer-to-Peer File Sharing System for Mobile Ad Hoc
        Networks}, 
  year = {2003}, 
  abstract = {Establishing peer-to-peer (P2P) file sharing for mobile ad hoc networks ANET)
        requires the construction of a search algorithm for transmitting queries and
        search results as well as the development of a transfer protocol for downloading
        files matching a query. In this paper, we present a special-purpose system for
        searching and file transfer tailored to both the characteristics of MANET and the
        requirements of peer-to-peer file sharing. Our approach is based on an
        application layer overlay networlc As innovative feature, overlay routes are set
        up on demand by the search algorithm, closely matching network topology and
        transparently aggregating redundant transfer paths on a per-file basis. The
        transfer protocol guarantees high data rates and low transmission overhead by
        utilizing overlay routes. In a detailed ns2 simulation study, we show that both
        the search algorithm and the transfer protocol outperform offthe -shelf
        approaches based on a P2P file sharing system for the wireline Internet, TCP and
        a MANET routing protocol}, 
  www_section = {ad-hoc networks, file-sharing, P2P}, 
  url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.12.9634}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/VTC03.pdf}, 
}
Klinedinst_anew
@booklet{Klinedinst_anew,
  title = {A New Generation of File Sharing Tools}, 
  author = {Dan Klinedinst}, 
  year = {2003}, 
  url = {http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.4.1694\&rep=rep1\&type=pdf},
%%%%% ERROR: Missing field
% www_section = {?????},
}
Knoll:2009:BPS:1590968.1591829
@conference{Knoll:2009:BPS:1590968.1591829,
  title = {Bootstrapping Peer-to-Peer Systems Using IRC}, 
  author = {Knoll, Mirko and Helling, Matthias and Arno Wacker and Holzapfel, Sebastian and
        Weis, Torben}, 
  booktitle = {WETICE'09--Proceedings of the 18th IEEE International Workshops on Enabling
        Technologies: Infrastructures for Collaborative Enterprises}, 
  organization = {IEEE Computer Society}, 
  year = {2009}, 
  month = {June}, 
  address = {Groningen, The Netherlands}, 
  pages = {122--127}, 
  publisher = {IEEE Computer Society}, 
  series = {WETICE '09}, 
  abstract = {Research in the area of peer-to-peer systems is mainly focused on structuring
        the overlay network. Little attention is paid to the process of setting up and
        joining a peer-to-peer overlay network, i.e. the bootstrapping of peer-to-peer
        networks. The major challenge is to get hold of one peer that is already in the
        overlay. Otherwise, the first peer must be able to detect that the overlay is
        currently empty. Successful P2P applications either provide a centralized server
        for this task (Skype) or they simply put the burden on the user (eMule). We
        propose an automatic solution which does not require any user intervention and
        does not exhibit a single point of failure. Such decentralized bootstrapping
        protocols are especially important for open non-commercial peer-to-peer systems
        which cannot provide a server infrastructure for bootstrapping. The algorithm we
        are proposing builds on the Internet Relay Chat (IRC), a highly available,
        open,and distributed network of chat servers. Our algorithm is designed to put
        only a very minimal load on the IRC servers.In measurements we show that our
        bootstrapping protocol scales very well, handles flash crowds, and does only put
        a constant load on the IRC system disregarding of the peer-to-peer overlay size}, 
  www_section = {automated, bootstrapping, decentralized, efficient, IRC, P2P, peer-to-peer
        networking}, 
  isbn = {978-0-7695-3683-5}, 
  doi = {http://dx.doi.org/10.1109/WETICE.2009.40}, 
  url = {http://dx.doi.org/10.1109/WETICE.2009.40}, 
}
Koch:2010:EPL:1827418.1827440
@conference{Koch:2010:EPL:1827418.1827440,
  title = {Event processing for large-scale distributed games}, 
  author = {Gerald G. Koch and Tariq, Muhammad Adnan and Boris Koldehofe and Kurt
        Rothermel}, 
  booktitle = {Proceedings of the Fourth ACM International Conference on Distributed
        Event-Based Systems}, 
  organization = {ACM}, 
  year = {2010}, 
  address = {New York, NY, USA}, 
  pages = {103--104}, 
  publisher = {ACM}, 
  series = {DEBS '10}, 
  abstract = {Novel peer-to-peer-based multiplayer online games are instantiated in an
        ad-hoc manner without the support of dedicated infrastructure and maintain their
        state in a distributed manner. Although their employed communication paradigms
        provide efficient access to sections of distributed state, such communication
        fails if the participants need to access large subsets of the application state
        in order to detect high-level situations. We propose a demonstration that shows
        how multiplayer online games can benefit from using publish/subscribe
        communication and complex event processing alongside their traditional
        communication paradigm}, 
  www_section = {content-based publish/subscribe, distributed complex event processing,
        multi-player online game}, 
  isbn = {978-1-60558-927-5}, 
  doi = {http://doi.acm.org/10.1145/1827418.1827440}, 
  url = {http://doi.acm.org/10.1145/1827418.1827440}, 
}
KongHG07
@article{KongHG07,
  title = {An Identity-Free and On-Demand Routing Scheme against Anonymity Threats in
        Mobile Ad Hoc Networks}, 
  author = {Jiejun Kong and Xiaoyan Hong and Mario Gerla}, 
  journal = {IEEE Transactions on Mobile Computing}, 
  volume = {6}, 
  number = {8}, 
  year = {2007}, 
  address = {Los Alamitos, CA, USA}, 
  pages = {888--902}, 
  publisher = {IEEE Computer Society}, 
  abstract = {Introducing node mobility into the network also introduces new anonymity
        threats. This important change of the concept of anonymity has recently attracted
        attentions in mobile wireless security research. This paper presents
        identity-free routing and on-demand routing as two design principles of anonymous
        routing in mobile ad hoc networks. We devise ANODR (ANonymous On-Demand Routing)
        as the needed anonymous routing scheme that is compliant with the design
        principles. Our security analysis and simulation study verify the effectiveness
        and efficiency of ANODR}, 
  www_section = {ad-hoc networks, anonymity, identity-free routing, neighborhood
        management, network complexity theory}, 
  issn = {1536-1233}, 
  doi = {10.1109/TMC.2007.1021}, 
  url = {http://portal.acm.org/citation.cfm?id=1272127}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/KongHG07.pdf}, 
}
Kostic:2003:BHB:945445.945473
@conference{Kostic:2003:BHB:945445.945473,
  title = {Bullet: High Bandwidth Data Dissemination Using an Overlay Mesh}, 
  author = {Kosti{\'c}, Dejan and Rodriguez, Adolfo and Albrecht, Jeannie and Vahdat,
        Amin}, 
  booktitle = {SOSP'03. Proceedings of the 19th ACM Symposium on Operating Systems
        Principles}, 
  organization = {ACM}, 
  year = {2003}, 
  month = {October}, 
  address = {Bolton Landing, NY, USA}, 
  pages = {282--297}, 
  publisher = {ACM}, 
  series = {SOSP '03}, 
  abstract = {In recent years, overlay networks have become an effective alternative to IP
        multicast for efficient point to multipoint communication across the Internet.
        Typically, nodes self-organize with the goal of forming an efficient overlay
        tree, one that meets performance targets without placing undue burden on the
        underlying network. In this paper, we target high-bandwidth data distribution
        from a single source to a large number of receivers. Applications include
        large-file transfers and real-time multimedia streaming. For these applications,
        we argue that an overlay mesh, rather than a tree, can deliver fundamentally
        higher bandwidth and reliability relative to typical tree structures. This paper
        presents Bullet, a scalable and distributed algorithm that enables nodes spread
        across the Internet to self-organize into a high bandwidth overlay mesh. We
        construct Bullet around the insight that data should be distributed in a disjoint
        manner to strategic points in the network. Individual Bullet receivers are then
        responsible for locating and retrieving the data from multiple points in
        parallel.Key contributions of this work include: i) an algorithm that sends data
        to different points in the overlay such that any data object is equally likely to
        appear at any node, ii) a scalable and decentralized algorithm that allows nodes
        to locate and recover missing data items, and iii) a complete implementation and
        evaluation of Bullet running across the Internet and in a large-scale emulation
        environment reveals up to a factor two bandwidth improvements under a variety of
        circumstances. In addition, we find that, relative to tree-based solutions,
        Bullet reduces the need to perform expensive bandwidth probing. In a tree, it is
        critical that a node's parent delivers a high rate of application data to each
        child. In Bullet however, nodes simultaneously receive data from multiple sources
        in parallel, making it less important to locate any single source capable of
        sustaining a high transmission rate}, 
  www_section = {BANDWIDTH, bullet, overlays, peer-to-peer networking}, 
  isbn = {1-58113-757-5}, 
  doi = {http://doi.acm.org/10.1145/945445.945473}, 
  url = {http://doi.acm.org/10.1145/945445.945473}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/SOSP\%2703\%20-\%20Bullet.pdf},
}
Kostoulas:2005:DSS:1097873.1098292
@conference{Kostoulas:2005:DSS:1097873.1098292,
  title = {Decentralized Schemes for Size Estimation in Large and Dynamic Groups}, 
  author = {Kostoulas, Dionysios and Psaltoulis, Dimitrios and Indranil Gupta and Kenneth
        P. Birman and Alan Demers}, 
  booktitle = {NCA'05--Proceedings of the 4th IEEE International Symposium on Network
        Computing and Applications}, 
  organization = {IEEE Computer Society}, 
  year = {2005}, 
  month = {July}, 
  address = {Cambridge, MA, USA}, 
  pages = {41--48}, 
  publisher = {IEEE Computer Society}, 
  abstract = {Large-scale and dynamically changing distributed systems such as the Grid,
        peer-to-peer overlays, etc., need to collect several kinds of global statistics
        in a decentralized manner. In this paper, we tackle a specific statistic
        collection problem called Group Size Estimation, for estimating the number of
        non-faulty processes present in the global group at any given point of time. We
        present two new decentralized algorithms for estimation in dynamic groups,
        analyze the algorithms, and experimentally evaluate them using real-life traces.
        One scheme is active: it spreads a gossip into the overlay first, and then
        samples the receipt times of this gossip at different processes. The second
        scheme is passive: it measures the density of processes when their identifiers
        are hashed into a real interval. Both schemes have low latency, scalable
        perprocess overheads, and provide high levels of probabilistic accuracy for the
        estimate. They are implemented as part of a size estimation utility called
        PeerCounter that can be incorporated modularly into standard peer-to-peer
        overlays. We present experimental results from both the simulations and
        PeerCounter, running on a cluster of 33 Linux servers}, 
  www_section = {decentralized, distributed systems, network size estimation}, 
  isbn = {0-7695-2326-9}, 
  doi = {10.1109/NCA.2005.15}, 
  url = {http://dl.acm.org/citation.cfm?id=1097873.1098292}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/NCA\%2705\%20-\%20Decentralized\%20Schemes\%20for\%20Size\%20Estimation\%20in\%20Large\%20and\%20Dynamic\%20Groups.pdf},
}
Kuegler03ananalysis
@conference{Kuegler03ananalysis,
  title = {An Analysis of GNUnet and the Implications for Anonymous, Censorship-Resistant
        Networks}, 
  author = {Dennis K{\"u}gler}, 
  booktitle = {Proceedings of the 3rd International Workshop on Privacy Enhancing
        Technologies (PET 2003)}, 
  organization = {Springer-Verlag}, 
  year = {2003}, 
  month = {January}, 
  pages = {161--176}, 
  publisher = {Springer-Verlag}, 
  www_section = {anonymity, GNUnet}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/GNUnet_pet.pdf}, 
}
Kumar2006Algorithms
@article{Kumar2006Algorithms,
  title = {Algorithms to accelerate multiple regular expressions matching for deep packet
        inspection}, 
  author = {Kumar, Sailesh and Dharmapurikar, Sarang and Yu, Fang and Crowley, Patrick and
        Turner, Jonathan}, 
  journal = {SIGCOMM Comput. Commun. Rev}, 
  volume = {36}, 
  number = {4}, 
  year = {2006}, 
  address = {New York, NY, USA}, 
  pages = {339--350}, 
  publisher = {ACM}, 
  www_section = {deep packet inspection, DFA, regular expressions}, 
  issn = {0146-4833}, 
  doi = {10.1145/1151659.1159952}, 
  url = {http://doi.acm.org/10.1145/1151659.1159952}, 
}
Kumar2006Algorithms_0
@article{Kumar2006Algorithms_0,
  title = {Algorithms to accelerate multiple regular expressions matching for deep packet
        inspection}, 
  author = {Kumar, Sailesh and Dharmapurikar, Sarang and Yu, Fang and Crowley, Patrick and
        Turner, Jonathan}, 
  journal = {SIGCOMM Comput. Commun. Rev}, 
  volume = {36}, 
  number = {4}, 
  year = {2006}, 
  address = {New York, NY, USA}, 
  pages = {339--350}, 
  publisher = {ACM}, 
  www_section = {deep packet inspection, DFA, regular expressions}, 
  issn = {0146-4833}, 
  doi = {10.1145/1151659.1159952}, 
  url = {http://doi.acm.org/10.1145/1151659.1159952}, 
}
Kwon:2003:EPF:827273.829221
@conference{Kwon:2003:EPF:827273.829221,
  title = {An Efficient Peer-to-Peer File Sharing Exploiting Hierarchy and Asymmetry}, 
  author = {Kwon, Gisik and Ryu, Kyung D.}, 
  booktitle = {SAINT'03. Proceedings of the 2003 Symposium on Applications and the
        Internet}, 
  organization = {IEEE Computer Society}, 
  year = {2003}, 
  month = {January}, 
  address = {Orlando, Florida, USA}, 
  pages = {0--226}, 
  publisher = {IEEE Computer Society}, 
  series = {SAINT '03}, 
  abstract = {Many Peer-to-Peer (P2P) file sharing systems have been proposed to take
        advantage of high scalability and abundant resources at end-user machines.
        Previous approaches adopted either simple flooding or routing with complex
        structures, such as Distributed HashingTables (DHT). However, these approaches
        did not consider the heterogeneous nature of the machines and the hierarchy of
        networks on the Internet. This paper presents Peer-to-peer Asymmetric file
        Sharing System(PASS), a novel approach to P2P file sharing, which accounts for
        the different capabilities and network locations of the participating machines.
        Our system selects only a portion of high-capacity machines(supernodes) for
        routing support, and organizes the network by using location information. We show
        that our key-coverage based directory replication improves the file search
        performance to a small constant number of routing hops, regardless of the network
        size}, 
  www_section = {asymmetry, hierarchy, P2P, pass, peer-to-peer asymmetric file sharing
        system, peer-to-peer networking}, 
  isbn = {0-7695-1872-9}, 
  doi = {http://doi.ieeecomputersociety.org/10.1109/SAINT.2003.1183054}, 
  url = {http://dl.acm.org/citation.cfm?id=827273.829221}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/SAINT\%2703\%20-\%20Kwon\%20\%26\%20Ryu.pdf},
}
LEBLOND:2011:INRIA-00574178:1
@conference{LEBLOND:2011:INRIA-00574178:1,
  title = {One Bad Apple Spoils the Bunch: Exploiting P2P Applications to Trace and Profile
        Tor Users}, 
  author = {Le Blond, Stevens and Manils, Pere and Abdelberi, Chaabane and Kaafar, Mohamed
        Ali and Claude Castelluccia and Legout, Arnaud and Dabbous, Walid}, 
  booktitle = {4th USENIX Workshop on Large-Scale Exploits and Emergent Threats (LEET
        '11)}, 
  organization = {USENIX}, 
  year = {2011}, 
  month = {March}, 
  address = {Boston, United States}, 
  publisher = {USENIX}, 
  abstract = {Tor is a popular low-latency anonymity network. However, Tor does not protect
        against the exploitation of an insecure application to reveal the IP address of,
        or trace, a TCP stream. In addition, because of the linkability of Tor streams
        sent together over a single circuit, tracing one stream sent over a circuit
        traces them all. Surprisingly, it is unknown whether this linkability allows in
        practice to trace a significant number of streams originating from secure (i.e.,
        proxied) applications. In this paper, we show that linkability allows us to trace
        193\% of additional streams, including 27\% of HTTP streams possibly originating
        from {\textquoteleft}{\textquoteleft}secure'' browsers. In particular, we traced
        9\% of Tor streams carried by our instrumented exit nodes. Using BitTorrent as
        the insecure application, we design two attacks tracing BitTorrent users on Tor.
        We run these attacks in the wild for 23 days and reveal 10,000 IP addresses of
        Tor users. Using these IP addresses, we then profile not only the BitTorrent
        downloads but also the websites visited per country of origin of Tor users. We
        show that BitTorrent users on Tor are over-represented in some countries as
        compared to BitTorrent users outside of Tor. By analyzing the type of content
        downloaded, we then explain the observed behaviors by the higher concentration of
        pornographic content downloaded at the scale of a country. Finally, we present
        results suggesting the existence of an underground BitTorrent ecosystem on Tor}, 
  www_section = {anonymity, Tor}, 
  url = {http://hal.inria.fr/inria-00574178/PDF/btor.pdf}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/btor.pdf}, 
}
LOCEntropy2008
@booklet{LOCEntropy2008,
  title = {Entropy Bounds for Traffic Confirmation}, 
  author = {Luke O'Connor}, 
  number = {2008/365}, 
  year = {2008}, 
  month = {October}, 
  publisher = {IACR}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/LOCEntropy2008.pdf}, 
%%%%% ERROR: Missing field
% www_section = {?????},
}
Lai03incentivesfor
@conference{Lai03incentivesfor,
  title = {Incentives for Cooperation in Peer-to-Peer Networks}, 
  author = {Kevin Lai and Michal Feldman and Ion Stoica and John Chuang}, 
  booktitle = {P2PECON. Proceedings of the First Workshop on Economics of Peer-to-Peer
        Systems}, 
  year = {2003}, 
  month = {June}, 
  address = {Berkeley, California, USA}, 
  abstract = {this paper, our contributions are to generalize from the traditional
        symmetric EPD to the asymmetric transactions of P2P applications, map out the
        design space of EPD-based incentive techniques, and simulate a subset of these
        techniques. Our findings are as follows: Incentive techniques relying on private
        history (where entites only use their private histories of entities' actions)
        fail as the population size increases}, 
  www_section = {P2P, privacy}, 
  url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.14.1949}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/incentives-for-cooperation-in_0.pdf},
}
Landsiedel_dynamicmultipath
@booklet{Landsiedel_dynamicmultipath,
  title = {Dynamic Multipath Onion Routing in Anonymous Peer-To-Peer Overlay Networks}, 
  author = {Olaf Landsiedel and Alexis Pimenidis and Klaus Wehrle}, 
  year = {2007}, 
  abstract = {Although recent years provided many protocols for anonymous routing in
        overlay networks, they commonly rely on the same communication paradigm: Onion
        Routing. In Onion Routing a static tunnel through an overlay network is build via
        layered encryption. All traffic exchanged by its end points is relayed through
        this tunnel. In contrast, this paper introduces dynamic multipath Onion Routing
        to extend the static Onion Routing paradigm. This approach allows each packet
        exchanged between two end points to travel along a different path. To provide
        anonymity the first half of this path is selected by the sender and the second
        half by the receiver of the packet. The results are manifold: First, dynamic
        multipath Onion Routing increases the resilience against threats, especially
        pattern and timing based analysis attacks. Second, the dynamic paths reduce the
        impact of misbehaving and overloaded relays. Finally, inspired by Internet
        routing, the forwarding nodes do not need to maintain any state about ongoing
        flows and so reduce the complexity of the router. In this paper, we describe the
        design of our dynamic Multipath Onion RoutEr (MORE) for peer-to-peer overlay
        networks, and evaluate its performance. Furthermore, we integrate address
        virtualization to abstract from Internet addresses and provide transparent
        support for IP applications. Thus, no application-level gateways, proxies or
        modifications of applications are required to sanitize protocols from network
        level information. Acting as an IP-datagram service, our scheme provides a
        substrate for anonymous communication to a wide range of applications using TCP
        and UDP}, 
  www_section = {onion routing, overlay networks, P2P}, 
  isbn = {978-1-4244-1043-9}, 
  url = {http://ieeexplore.ieee.org/Xplore/login.jsp?url=http\%3A\%2F\%2Fieeexplore.ieee.org\%2Fiel5\%2F4410909\%2F4410910\%2F04410930.pdf\%3Farnumber\%3D4410930\&authDecision=-203},
}
Lee2007CISS
@article{Lee2007CISS,
  title = {CISS: An efficient object clustering framework for DHT-based peer-to-peer
        applications}, 
  author = {Lee, Jinwon and Lee, Hyonik and Kang, Seungwoo and Kim, Su Myeon and Song,
        Junehwa}, 
  journal = {Comput. Netw}, 
  volume = {51}, 
  number = {4}, 
  year = {2007}, 
  address = {New York, NY, USA}, 
  pages = {1072--1094}, 
  publisher = {Elsevier North-Holland, Inc}, 
  www_section = {distributed hash table, load balancing, Multi-dimensional range query,
        Object clustering, Peer-to-peer application}, 
  issn = {1389-1286}, 
  doi = {10.1016/j.comnet.2006.07.005}, 
  url = {http://dx.doi.org/10.1016/j.comnet.2006.07.005}, 
}
Leibowitz:2003:DKN:832311.837393
@conference{Leibowitz:2003:DKN:832311.837393,
  title = {Deconstructing the Kazaa Network}, 
  author = {Leibowitz, Nathaniel and Ripeanu, Matei and Wierzbicki, Adam}, 
  booktitle = {WIAPP'03--Proceedings of the The Third IEEE Workshop on Internet
        Applications}, 
  organization = {IEEE Computer Society}, 
  year = {2003}, 
  month = {June}, 
  address = {San Jos{\'e}, CA, USA}, 
  pages = {0--112}, 
  publisher = {IEEE Computer Society}, 
  series = {WIAPP '03}, 
  abstract = {Internet traffic is experiencing a shift from webtraffic to file swapping
        traffic. Today a significant partof Internet traffic is generated by peer-to-peer
        applications, mostly by the popular Kazaa application.Yet, to date, few studies
        analyze Kazaa traffic, thusleaving the bulk of Internet traffic in dark. We
        presenta large-scale investigation of Kazaa traffic based onlogs collected at a
        large Israeli ISP, which captureroughly a quarter of all traffic between Israel
        and US}, 
  www_section = {file swapping traffic, kazaa, traffic}, 
  isbn = {0-7695-1972-5}, 
  url = {http://dl.acm.org/citation.cfm?id=832311.837393}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/WIAPP\%2703\%20-\%20Deconstructing\%20the\%20Kazaa\%20network.pdf},
}
Levien04attackresistant
@booklet{Levien04attackresistant,
  title = {Attack Resistant Trust Metrics}, 
  author = {Raph Levien}, 
  year = {2004}, 
  abstract = {This dissertation characterizes the space of trust metrics, under both the
        scalar assumption where each assertion is evaluated independently, and the group
        assumption where a group of assertions are evaluated in tandem. We present a
        quantitative framework for evaluating the attack resistance of trust metrics, and
        give examples of trust metrics that are within a small factor of optimum compared
        to theoretical upper bounds. We discuss experiences with a realworld deployment
        of a group trust metric, the Advogato website. Finally, we explore possible
        applications of attack resistant trust metrics, including using it as to build a
        distributed name server, verifying metadata in peer-to-peer networks such as
        music sharing systems, and a proposal for highly spam resistant e-mail delivery}, 
  url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.83.9266}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/compact.pdf}, 
%%%%% ERROR: Missing field
% www_section = {?????},
}
Levin:2008:BAA:1402946.1402987
@article{Levin:2008:BAA:1402946.1402987,
  title = {BitTorrent is an Auction: Analyzing and Improving BitTorrent's Incentives}, 
  author = {Levin, Dave and LaCurts, Katrina and Spring, Neil and Bobby Bhattacharjee}, 
  journal = {SIGCOMM Computer Communication Review}, 
  volume = {38}, 
  year = {2008}, 
  month = {August}, 
  address = {New York, NY, USA}, 
  pages = {243--254}, 
  publisher = {ACM}, 
  abstract = {Incentives play a crucial role in BitTorrent, motivating users to upload to
        others to achieve fast download times for all peers. Though long believed to be
        robust to strategic manipulation, recent work has empirically shown that
        BitTorrent does not provide its users incentive to follow the protocol. We
        propose an auction-based model to study and improve upon BitTorrent's incentives.
        The insight behind our model is that BitTorrent uses, not tit-for-tat as widely
        believed, but an auction to decide which peers to serve. Our model not only
        captures known, performance-improving strategies, it shapes our thinking toward
        new, effective strategies. For example, our analysis demonstrates,
        counter-intuitively, that BitTorrent peers have incentive to intelligently
        under-report what pieces of the file they have to their neighbors. We implement
        and evaluate a modification to BitTorrent in which peers reward one another with
        proportional shares of bandwidth. Within our game-theoretic model, we prove that
        a proportional-share client is strategy-proof. With experiments on PlanetLab, a
        local cluster, and live downloads, we show that a proportional-share unchoker
        yields faster downloads against BitTorrent and BitTyrant clients, and that
        under-reporting pieces yields prolonged neighbor interest}, 
  www_section = {auctions, BitTorrent, proportional share, tit-for-tat}, 
  issn = {0146-4833}, 
  doi = {http://doi.acm.org/10.1145/1402946.1402987}, 
  url = {http://doi.acm.org/10.1145/1402946.1402987}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/SIGCOMM\%20Comput.\%20Commun.\%20Rev.\%20-\%20BitTorrent\%20is\%20an\%20Auction.pdf},
}
Levine:2002
@article{Levine:2002,
  title = {Hordes --- A Multicast Based Protocol for Anonymity}, 
  author = {Brian Neil Levine and Clay Shields}, 
  journal = {Journal of Computer Security}, 
  volume = {10}, 
  number = {3}, 
  year = {2002}, 
  pages = {213--240}, 
  abstract = {With widespread acceptance of the Internet as a public medium for
        communication and information retrieval, there has been rising concern that the
        personal privacy of users can be eroded by cooperating network entities. A
        technical solution to maintaining privacy is to provide anonymity. We present a
        protocol for initiator anonymity called Hordes, which uses forwarding mechanisms
        similar to those used in previous protocols for sending data, but is the first
        protocol to make use of multicast routing to anonymously receive data. We show
        this results in shorter transmission latencies and requires less work of the
        protocol participants, in terms of the messages processed. We also present a
        comparison of the security and anonymity of Hordes with previous protocols, using
        the first quantitative definition of anonymity and unlinkability}, 
  www_section = {anonymity, Hordes, multicast, routing}, 
  issn = {0926-227X}, 
  url = {http://portal.acm.org/citation.cfm?id=603406}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Levine-2002.pdf}, 
}
Levine:2006
@article{Levine:2006,
  title = {A Survey of Solutions to the Sybil Attack}, 
  author = {Brian Neil Levine and Clay Shields and Margolin, N. Boris}, 
  journal = {unknown}, 
  institution = {University of Massachusetts Amherst}, 
  number = {2006-052}, 
  year = {2006}, 
  month = {October}, 
  address = {Amherst, MA}, 
  type = {Tech report}, 
  abstract = {Many security mechanisms are based on specific assumptions of identity and
        are vulnerable to attacks when these assumptions are violated. For example,
        impersonation is the well-known consequence when authenticating credentials are
        stolen by a third party. Another attack on identity occurs when credentials for
        one identity are purposely shared by multiple individuals, for example to avoid
        paying twice for a service. In this paper, we survey the impact of the Sybil
        attack, an attack against identity in which an individual entity masquerades as
        multiple simultaneous identities. The Sybil attack is a fundamental problem in
        many systems, and it has so far resisted a universally applicable solution}, 
  www_section = {anonymity, security, Sybil attack}, 
  url = {http://prisms.cs.umass.edu/brian/pubs/levine.sybil.tr.2006.pdf}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Tech\%20Report\%20-\%20A\%20Survey\%20of\%20Solutions\%20to\%20the\%20Sybil\%20Attack.pdf},
}
Li2007-tcloseness
@conference{Li2007-tcloseness,
  title = {t-Closeness: Privacy Beyond k-Anonymity and $\ell$-Diversity}, 
  author = {Ninghui Li and Tiancheng Li and Suresh Venkatasubramanian}, 
  booktitle = {t-Closeness: Privacy Beyond k-Anonymity and $\ell$-Diversity}, 
  year = {2007}, 
  pages = {106--115}, 
%%%%% ERROR: Missing field
% www_section = {?????},
}
Li:2003:MRQ:958491.958500
@conference{Li:2003:MRQ:958491.958500,
  title = {Multi-dimensional range queries in sensor networks}, 
  author = {Li, Xin and Kim, Young Jin and Govindan, Ramesh and Hong, Wei}, 
  booktitle = {Proceedings of the 1st international conference on Embedded networked sensor
        systems}, 
  organization = {ACM}, 
  year = {2003}, 
  address = {New York, NY, USA}, 
  pages = {63--75}, 
  publisher = {ACM}, 
  series = {SenSys '03}, 
  www_section = {distributed hash table, multi-dimensional range queries, range queries}, 
  isbn = {1-58113-707-9}, 
  doi = {10.1145/958491.958500}, 
  url = {http://doi.acm.org/10.1145/958491.958500}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/eScholarship\%20UC\%20item\%204x6723n2.pdf},
}
Liberatore:2006
@conference{Liberatore:2006,
  title = {Inferring the Source of Encrypted HTTP Connections}, 
  author = {Marc Liberatore and Brian Neil Levine}, 
  booktitle = {Proceedings of the 13th ACM conference on Computer and Communications
        Security (CCS 2006)}, 
  organization = {ACM New York, NY, USA}, 
  year = {2006}, 
  month = {October}, 
  pages = {255--263}, 
  publisher = {ACM New York, NY, USA}, 
  abstract = {We examine the effectiveness of two traffic analysis techniques for
        identifying encrypted HTTP streams. The techniques are based upon classification
        algorithms, identifying encrypted traffic on the basis of similarities to
        features in a library of known profiles. We show that these profiles need not be
        collected immediately before the encrypted stream; these methods can be used to
        identify traffic observed both well before and well after the library is created.
        We give evidence that these techniques will exhibit the scalability necessary to
        be effective on the Internet. We examine several methods of actively countering
        the techniques, and we find that such countermeasures are effective, but at a
        significant increase in the size of the traffic stream. Our claims are
        substantiated by experiments and simulation on over 400,000 traffic streams we
        collected from 2,000 distinct web sites during a two month period}, 
  www_section = {latency, network forensics, traffic analysis}, 
  isbn = {1-59593-518-5}, 
  doi = {10.1145/1180405.1180437}, 
  url = {http://portal.acm.org/citation.cfm?id=1180437}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Liberatore-2006.pdf}, 
}
Liedtke93apersistent
@booklet{Liedtke93apersistent,
  title = {A Persistent System in Real Use--Experiences of the First 13 Years}, 
  author = {Jochen Liedtke}, 
  year = {1993}, 
  abstract = {Eumel and its advanced successor L3 are operating systems built by GMD which
        have been used, for 13 years and 4 years respectively, as production systems in
        business and education. More than 2000 Eumel systems and 500 L3 systems have been
        shipped since 1979 and 1988. Both systems rely heavily on the paradigm of
        persistence (including fault-surviving persistence). Both data and processes, in
        principle all objects are persistent, files are implemented by means of
        persistent objects (not vice versa) etc. In addition to the principles and
        mechanisms of Eumel /L3, general and specific experiences are described: these
        relate to the design, implementation and maintenance of the systems over the last
        13 years. For general purpose timesharing systems the idea is powerful and
        elegant, it can be efficiently implemented, but making a system really usable is
        hard work}, 
  url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.53.7112}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.53.7112.pdf}, 
  www_section = {Unsorted}, 
}
Locher06freeriding
@conference{Locher06freeriding,
  title = {Free Riding in BitTorrent is Cheap}, 
  author = {Thomas Locher and Patrick Moor and Stefan Schmid and Roger Wattenhofer}, 
  booktitle = {In HotNets}, 
  year = {2006}, 
  abstract = {While it is well-known that BitTorrent is vulnerable to selfish behavior,
        this paper demonstrates that even entire files can be downloaded without
        reciprocating at all in BitTorrent. To this end, we present BitThief, a free
        riding client that never contributes any real data. First, we show that simple
        tricks suffice in order to achieve high download rates, even in the absence of
        seeders. We also illustrate how peers in a swarm react to various sophisticated
        attacks. Moreover, our analysis reveals that sharing
        communities{\textemdash}communities originally intended to offer downloads of
        good quality and to promote cooperation among peers{\textemdash}provide many
        incentives to cheat}, 
  www_section = {BitTorrent}, 
  url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.67.9307}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.67.9307.pdf}, 
}
Locher:2010:PKN:2018057.2018085
@conference{Locher:2010:PKN:2018057.2018085,
  title = {Poisoning the Kad network}, 
  author = {Thomas Locher and Mysicka, David and Stefan Schmid and Roger Wattenhofer}, 
  booktitle = {ICDCN'10--Proceedings of the 11th International Conference on Distributed
        Computing and Networking}, 
  organization = {Springer-Verlag}, 
  year = {2010}, 
  month = {January}, 
  address = {Kolkata, India}, 
  pages = {195--206}, 
  publisher = {Springer-Verlag}, 
  series = {ICDCN'10}, 
  abstract = {Since the demise of the Overnet network, the Kad network has become not only
        the most popular but also the only widely used peer-to-peer system based on a
        distributed hash table. It is likely that its user base will continue to grow in
        numbers over the next few years as, unlike the eDonkey network, it does not
        depend on central servers, which increases scalability and reliability. Moreover,
        the Kad network is more efficient than unstructured systems such as Gnutella.
        However, we show that today's Kad network can be attacked in several ways by
        carrying out several (well-known) attacks on the Kad network. The presented
        attacks could be used either to hamper the correct functioning of the network
        itself, to censor contents, or to harm other entities in the Internet not
        participating in the Kad network such as ordinary web servers. While there are
        simple heuristics to reduce the impact of some of the attacks, we believe that
        the presented attacks cannot be thwarted easily in any fully decentralized
        peer-to-peer system without some kind of a centralized certification and
        verification authority}, 
  www_section = {distributed hash table, KAD}, 
  isbn = {3-642-11321-4, 978-3-642-11321-5}, 
  doi = {http://dx.doi.org/10.1007/978-3-642-11322-2_22}, 
  url = {http://dl.acm.org/citation.cfm?id=2018057.2018085}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/ICDCN\%2710\%20-\%20Poisoning\%20the\%20Kad\%20Network.pdf},
}
Loo03peer-to-peerbackup
@booklet{Loo03peer-to-peerbackup,
  title = {Peer-To-Peer Backup for Personal Area Networks}, 
  author = {Boon Thau Loo and Anthony LaMarca and Gaetano Borriello and Boon Thau Loo}, 
  year = {2003}, 
  abstract = {FlashBack is a peer-to-peer backup algorithm designed for powerconstrained
        devices running in a personal area network (PAN). Backups are performed
        transparently as local updates initiate the spread of backup data among a subset
        of the currently available peers. Flashback limits power usage by avoiding
        flooding and keeping small neighbor sets. Flashback has also been designed to
        utilize powered infrastructure when possible to further extend device lifetime.
        We propose our architecture and algorithms, and present initial experimental
        results that illustrate FlashBack's performance characteristics}, 
  www_section = {backup, P2P, personal area network}, 
  url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.9.7820}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/052820031647_102.pdf}, 
}
Lpcox03samsara:honor
@conference{Lpcox03samsara:honor,
  title = {Samsara: Honor Among Thieves in Peer-to-Peer Storage}, 
  author = {Landon P. Cox and Brian D. Noble}, 
  booktitle = {SOSP'03--Proceedings of the Nineteenth ACM Symposium on Operating Systems
        Principles}, 
  organization = {ACM Press}, 
  year = {2003}, 
  month = {October}, 
  address = {Bolton Landing, NY, USA}, 
  pages = {120--132}, 
  publisher = {ACM Press}, 
  abstract = {Peer-to-peer storage systems assume that their users consume resources in
        proportion to their contribution. Unfortunately, users are unlikely to do this
        without some enforcement mechanism. Prior solutions to this problem require
        centralized infrastructure, constraints on data placement, or ongoing
        administrative costs. All of these run counter to the design philosophy of
        peer-to-peer systems. requiring trusted third parties, symmetric storage
        relationships, monetary payment, or certified identities. Each peer that requests
        storage of another must agree to hold a claim in return---a placeholder that
        accounts for available space. After an exchange, each partner checks the other to
        ensure faithfulness. Samsara punishes unresponsive nodes probabilistically.
        Because objects are replicated, nodes with transient failures are unlikely to
        suffer data loss, unlike those that are dishonest or chronically unavailable.
        Claim storage overhead can be reduced when necessary by forwarding among chains
        of nodes, and eliminated when cycles are created. Forwarding chains increase the
        risk of exposure to failure, but such risk is modest under reasonable assumptions
        of utilization and simultaneous, persistent failure}, 
  www_section = {P2P, reputation}, 
  url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.5.6734}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/p135-cox.pdf}, 
}
LuFSG05
@conference{LuFSG05,
  title = {Some Remarks on Universal Re-encryption and A Novel Practical Anonymous Tunnel}, 
  author = {Tianbo Lu and Bin-Xing Fang and Yuzhong Sun and Li Guo}, 
  booktitle = {Proceedings of ICCNMC}, 
  year = {2005}, 
  pages = {853--862}, 
  abstract = {In 2004 Golle, Jakobsson, Juels and Syverson presented a new encryption
        scheme called the universal re-encryption [GJJS04] for mixnets [Cha81] which was
        extended by Gomulkiewicz et al. [GKK04]. We discover that this scheme and its
        extension both are insecure against a chosen ciphertext attack proposed by
        Pfitzmann in 1994 [Pfi94]. Another drawback of them is low efficiency for
        anonymous communications due to their long ciphertexts, i.e., four times the size
        of plaintext. Accordingly, we devise a novel universal and efficient anonymous
        tunnel, rWonGoo, for circuit-based low-latency communications in large scale
        peer-to-peer environments to dramatically decrease possibility to suffer from the
        attack [Pfi94]. The basic idea behind rWonGoo is to provide anonymity with
        re-encryption and random forwarding, obtaining practicality, correctness and
        efficiency in encryption in the way differing from the layered encryption systems
        [Cha81] that can be difficult to achieve correctness of tunnels}, 
  isbn = {978-3-540-28102-3}, 
  doi = {10.1007/11534310}, 
  url = {http://www.springerlink.com/content/b3x4na87xbmcextx/}, 
%%%%% ERROR: Missing field
% www_section = {?????},
}
Lua05asurvey
@article{Lua05asurvey,
  title = {A Survey and Comparison of Peer-to-Peer Overlay Network Schemes}, 
  author = {Eng Keong Lua and Jon Crowcroft and Marcelo Pias and Ravi Sharma and Steven
        Lim}, 
  journal = {IEEE Communications Surveys and Tutorials}, 
  volume = {7}, 
  year = {2005}, 
  pages = {72--93}, 
  abstract = {Over the Internet today, computing and communications environments are
        significantly more complex and chaotic than classical distributed systems,
        lacking any centralized organization or hierarchical control. There has been much
        interest in emerging Peer-to-Peer (P2P) network overlays because they provide a
        good substrate for creating large-scale data sharing, content distribution and
        application-level multicast applications. These P2P networks try to provide a
        long list of features such as: selection of nearby peers, redundant storage,
        efficient search/location of data items, data permanence or guarantees,
        hierarchical naming, trust and authentication, and, anonymity. P2P networks
        potentially offer an efficient routing architecture that is self-organizing,
        massively scalable, and robust in the wide-area, combining fault tolerance, load
        balancing and explicit notion of locality. In this paper, we present a survey and
        comparison of various Structured and Unstructured P2P networks. We categorize the
        various schemes into these two groups in the design spectrum and discuss the
        application-level network performance of each group}, 
  www_section = {overlay, peer-to-peer networking}, 
  issn = {1553-877X}, 
  doi = {10.1109/COMST.2005.1610546}, 
  url = {http://www.slideshare.net/networkingcentral/a-survey-and-comparison-of-peertopeer-overlay-network-schemes},
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/IEEE\%20CST\%20-\%20A\%20Survey\%20and\%20Comparison\%20of\%20Peer-to-Peer\%20Overlay.pdf},
}
Luby01efficienterasure
@article{Luby01efficienterasure,
  title = {Efficient erasure correcting codes}, 
  author = {Luby, Michael and Michael Mitzenmacher and M. Amin Shokrollahi and Daniel A.
        Spielman}, 
  journal = {IEEE Transactions on Information Theory}, 
  booktitle = {Efficient erasure correcting codes}, 
  volume = {47}, 
  year = {2001}, 
  pages = {569--584}, 
  abstract = {We introduce a simple erasure recovery algorithm for codes derived from
        cascades of sparse bipartite graphs and analyze the algorithm by analyzing a
        corresponding discrete-time random process. As a result, we obtain a simple
        criterion involving the fractions of nodes of different degrees on both sides of
        the graph which is necessary and sufficient for the decoding process to finish
        successfully with high probability. By carefully designing these graphs we can
        construct for any given rate R and any given real number {\epsilon} a family of
        linear codes of rate R which can be encoded in time proportional to
        ln(1/{\epsilon}) times their block length n. Furthermore, a codeword can be
        recovered with high probability from a portion of its entries of length
        (1+{\epsilon})Rn or more. The recovery algorithm also runs in time proportional
        to n ln(1/{\epsilon}). Our algorithms have been implemented and work well in
        practice; various implementation issues are discussed}, 
  www_section = {coding theory, recovery algorithm}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.107.244.pdf}, 
}
Luby01improvedlow-density
@article{Luby01improvedlow-density,
  title = {Improved low-density parity-check codes using irregular graphs}, 
  author = {Luby, Michael and Michael Mitzenmacher and M. Amin Shokrollahi and Daniel A.
        Spielman}, 
  journal = {IEEE Trans. Inform. Theory}, 
  volume = {47}, 
  year = {2001}, 
  pages = {585--598}, 
  abstract = {We construct new families of error-correcting codes based on Gallager's
        low-density parity-check codes. We improve on Gallager's results by introducing
        irregular parity-check matrices and a new rigorous analysis of hard-decision
        decoding of these codes. We also provide efficient methods for finding good
        irregular structures for such decoding algorithms. Our rigorous analysis based on
        martingales, our methodology for constructing good irregular codes, and the
        demonstration that irregular structure improves performance constitute key points
        of our contribution. We also consider irregular codes under belief propagation.
        We report the results of experiments testing the efficacy of irregular codes on
        both binary-symmetric and Gaussian channels. For example, using belief
        propagation, for rate I R codes on 16 000 bits over a binary-symmetric channel,
        previous low-density parity-check codes can correct up to approximately 16 \%
        errors, while our codes correct over 17\%. In some cases our results come very
        close to reported results for turbo codes, suggesting that variations of
        irregular low density parity-check codes may be able to match or beat turbo code
        performance. Index Terms{\textemdash}Belief propagation, concentration theorem,
        Gallager codes, irregular codes, low-density parity-check codes}, 
  www_section = {coding theory, low-density parity-check}, 
  issn = {0018-9448}, 
  doi = {10.1109/18.910576}, 
  url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.137.6057}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/errorsIT.pdf}, 
}
Luby:1997:PLC:258533.258573
@conference{Luby:1997:PLC:258533.258573,
  title = {Practical Loss-Resilient Codes}, 
  author = {Luby, Michael and Michael Mitzenmacher and M. Amin Shokrollahi and Daniel A.
        Spielman and Stemann, Volker}, 
  booktitle = {STOC 1997--Proceedings of the 29th annual ACM symposium on Theory of
        computing}, 
  organization = {ACM}, 
  year = {1997}, 
  month = {May}, 
  address = {El Paso, Texas, USA}, 
  pages = {150--159}, 
  publisher = {ACM}, 
  series = {STOC '97}, 
  abstract = {We present a randomized construction of linear-time encodable and decodable
        codes that can transmit over lossy channels at rates extremely close to capacity.
        The encoding and decoding algorithms for these codes have fast and simple
        software implementations. Partial implementations of our algorithms are faster by
        orders of magnitude than the best software implementations of any previous
        algorithm for this problem. We expect these codes will be extremely useful for
        applications such as real-time audio and video transmission over the Internet,
        where lossy channels are common and fast decoding is a requirement. Despite the
        simplicity of the algorithms, their design and analysis are mathematically
        intricate. The design requires the careful choice of a random irregular bipartite
        graph, where the structure of the irregular graph is extremely important. We
        model the progress of the decoding algorithm by a set of differential equations.
        The solution to these equations can then be expressed as polynomials in one
        variable with coefficients determined by the graph structure. Based on these
        polynomials, we design a graph structure that guarantees successful decoding with
        high probability}, 
  www_section = {loss-resilient code}, 
  isbn = {0-89791-888-6}, 
  doi = {http://doi.acm.org/10.1145/258533.258573}, 
  url = {http://doi.acm.org/10.1145/258533.258573}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/STOC\%2797\%20-\%20Practical\%20Loss-Resilient\%20Codes.pdf},
}
Machanavajjhala2007
@article{Machanavajjhala2007,
  title = {$\ell$-diversity: Privacy beyond k-anonymity}, 
  author = {Ashwin Machanavajjhala and Daniel Kifer and Johannes Gehrke and
        Muthuramakrishnan Venkitasubramaniam}, 
  journal = {ACM Transactions on Knowledge Discovery from Data (TKDD)}, 
  volume = {1}, 
  number = {1}, 
  year = {2007}, 
%%%%% ERROR: Missing field
% www_section = {?????},
}
Magharei07meshor
@conference{Magharei07meshor,
  title = {Mesh or Multiple-Tree: A Comparative Study of Live P2P Streaming Approaches}, 
  author = {Magharei, Nazanin and Rejaie, Reza}, 
  booktitle = {INFOCOM 2007. 26th IEEE International Conference on Computer
        Communications}, 
  organization = {IEEE Computer Society}, 
  year = {2007}, 
  month = {May}, 
  address = {Anchorage, Alaska, USA}, 
  pages = {1424--1432}, 
  publisher = {IEEE Computer Society}, 
  abstract = {Existing approaches to P2P streaming can be divided into two general classes:
        (i) tree-based approaches use push-based content delivery over multiple
        tree-shaped overlays, and (ii) mesh-based approaches use swarming content
        delivery over a randomly connected mesh. Previous studies have often focused on a
        particular P2P streaming mechanism and no comparison between these two classes
        has been conducted. In this paper, we compare and contrast the performance of
        representative protocols from each class using simulations. We identify the
        similarities and differences between these two approaches. Furthermore, we
        separately examine the behavior of content delivery and overlay construction
        mechanisms for both approaches in static and dynamic scenarios. Our results
        indicate that the mesh-based approach consistently exhibits a superior
        performance over the tree-based approach. We also show that the main factors
        attributing in the inferior performance of the tree-based approach are (i) the
        static mapping of content to a particular tree, and (ii) the placement of each
        peer as an internal node in one tree and as a leaf in all other trees}, 
  www_section = {mesh, multple tree, overlay, P2P, peer-to-peer networking}, 
  isbn = {1-4244-1047-9}, 
  doi = {http://dx.doi.org/10.1109/INFCOM.2007.168}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/INFOCOM\%2707\%20-\%20Mesh\%20or\%20multiple-tree.pdf},
}
Magharei:2009:PPR:1618562.1618566
@conference{Magharei:2009:PPR:1618562.1618566,
  title = {PRIME: Peer-to-Peer Receiver-drIven MEsh-based Streaming}, 
  author = {Magharei, Nazanin and Rejaie, Reza}, 
  booktitle = {INFOCOM 2007. 26th IEEE International Conference on Computer
        Communications}, 
  organization = {IEEE Press}, 
  volume = {17}, 
  year = {2007}, 
  month = {May}, 
  address = {Anchorage, Alaska, USA}, 
  pages = {1052--1065}, 
  publisher = {IEEE Press}, 
  abstract = {The success of file swarming mechanisms such as BitTorrent has motivated a
        new approach for scalable streaming of live content that we call mesh-based
        Peer-to-Peer (P2P) streaming. In this approach, participating end-systems (or
        peers) form a randomly connected mesh and incorporate swarming content delivery
        to stream live content. Despite the growing popularity of this approach, neither
        the fundamental design tradeoffs nor the basic performance bottlenecks in
        mesh-based P2P streaming are well understood. In this paper, we follow a
        performance-driven approach to design PRIME, a scalable mesh-based P2P streaming
        mechanism for live content. The main design goal of PRIME is to minimize two
        performance bottlenecks, namely bandwidth bottleneck and content bottleneck. We
        show that the global pattern of delivery for each segment of live content should
        consist of a diffusion phase which is followed by a swarming phase. This leads to
        effective utilization of available resources to accommodate scalability and also
        minimizes content bottleneck. Using packet level simulations, we carefully
        examine the impact of overlay connectivity, packet scheduling scheme at
        individual peers and source behavior on the overall performance of the system.
        Our results reveal fundamental design tradeoffs of mesh-based P2P streaming for
        live content}, 
  www_section = {communication network, computer networks, Internet, multimedia
        communication, multimedia systems}, 
  issn = {1063-6692}, 
  doi = {http://dx.doi.org/10.1109/TNET.2008.2007434}, 
  url = {http://dx.doi.org/10.1109/TNET.2008.2007434}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/INFOCOM\%2707\%20-\%20PRIME.pdf},
}
Manber94findingsimilar
@conference{Manber94findingsimilar,
  title = {Finding Similar Files in a Large File System}, 
  author = {Udi Manber}, 
  booktitle = {USENIX WINTER 1994 TECHNICAL CONFERENCE}, 
  year = {1994}, 
  pages = {1--10}, 
  abstract = {We present a tool, called sif, for finding all similar files in a large file
        system. Files are considered similar if they have significant number of common
        pieces, even if they are very different otherwise. For example, one file may be
        contained, possibly with some changes, in another file, or a file may be a
        reorganization of another file. The running time for finding all groups of
        similar files, even for as little as 25\% similarity, is on the order of 500MB to
        1GB an hour. The amount of similarity and several other customized parameters can
        be determined by the user at a post-processing stage, which is very fast. Sif can
        also be used to very quickly identify all similar files to a query file using a
        preprocessed index. Application of sif can be found in file management,
        information collecting (to remove duplicates), program reuse, file
        synchronization, data compression, and maybe even plagiarism detection. 1.
        Introduction Our goal is to identify files that came from the same source}, 
  url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.12.3222}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.12.3222.pdf}, 
  www_section = {Unsorted}, 
}
MarPi08
@conference{MarPi08,
  title = {A Concept of an Anonymous Direct P2P Distribution Overlay System}, 
  author = {Igor Margasinski and Michal Pioro}, 
  booktitle = {Proceedings of IEEE 22nd International Conference on Advanced Information
        Networking and Applications (AINA)}, 
  organization = {IEEE Computer Society Press}, 
  year = {2008}, 
  month = {March}, 
  address = {Gino-wan, Okinawa, Japan}, 
  pages = {590--597}, 
  publisher = {IEEE Computer Society Press}, 
  abstract = {The paper introduces a peer-to-peer system called P2PRIV (peer-to-peer direct
        and anonymous distribution overlay). Basic novel features of P2PRIV are: (i) a
        peer-to-peer parallel content exchange architecture, and (ii) separation of the
        anonymization process from the transport function. These features allow a
        considerable saving of service time while preserving high degree of anonymity. In
        the paper we evaluate anonymity measures of P2PRIV (using a normalized entropy
        measurement model) as well as its traffic measures (including service time and
        network dynamics), and compare anonymity and traffic performance of P2PRIV with a
        well known system called CROWDS}, 
  www_section = {communication system security, privacy}, 
  isbn = {978-0-7695-3095-6}, 
  doi = {10.1109/AINA.2008.117}, 
  url = {http://portal.acm.org/citation.cfm?id=1395079.1395235}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/MarPi08.pdf}, 
}
Marks2010a
@conference{Marks2010a,
  title = {Unleashing Tor, BitTorrent \& Co.: How to Relieve TCP Deficiencies in
        Overlays}, 
  author = {Daniel Marks and Florian Tschorsch and Bjoern Scheuermann}, 
  booktitle = {LCN 2010: Proceedings of the 35th IEEE Conference on Local Computer
        Networks}, 
  year = {2010}, 
  url = {https://bibliography.gnunet.org}, 
  www_section = {Unsorted}, 
}
Marx:2006:PGS:1140638.1140647
@article{Marx:2006:PGS:1140638.1140647,
  title = {Parameterized graph separation problems}, 
  author = {Marx, D{\'a}niel}, 
  journal = {Theoretical Computer Science}, 
  volume = {351}, 
  year = {2006}, 
  month = {February}, 
  address = {Essex, UK}, 
  pages = {394--406}, 
  publisher = {Elsevier Science Publishers Ltd}, 
  abstract = {We consider parameterized problems where some separation property has to be
        achieved by deleting as few vertices as possible. The following five problems are
        studied: delete k vertices such that (a) each of the given l terminals is
        separated from the others, (b) each of the given l pairs of terminals is
        separated, (c) exactly l vertices are cut away from the graph, (d) exactly l
        connected vertices are cut away from the graph, (e) the graph is separated into
        at least l components. We show that if both k and l are parameters, then (a), (b)
        and (d) are fixed-parameter tractable, while (c) and (e) are W[1]-hard}, 
  www_section = {multicasting, multiway cut, parameterized complexity, separator}, 
  issn = {0304-3975}, 
  doi = {10.1016/j.tcs.2005.10.007}, 
  url = {http://dl.acm.org/citation.cfm?id=1140638.1140647}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Marx\%20-\%20Parameterized\%20graph\%20separation\%20problems.pdf},
}
Massachusetts05peer-to-peercommunication
@conference{Massachusetts05peer-to-peercommunication,
  title = {Peer-to-Peer Communication Across Network Address Translators}, 
  author = {Pyda Srisuresh and Bryan Ford and Dan Kegel}, 
  booktitle = {ATEC05. Proceedings of the USENIX Annual Technical Conference}, 
  organization = {USENIX Association}, 
  year = {2005}, 
  month = {April}, 
  address = {Anaheim, CA}, 
  pages = {179--192}, 
  publisher = {USENIX Association}, 
  abstract = {Network Address Translation (NAT) causes well-known difficulties for
        peer-to-peer (P2P) communication, since the peers involved may not be reachable
        at any globally valid IP address. Several NAT traversal techniques are known, but
        their documentation is slim, and data about their robustness or relative merits
        is slimmer. This paper documents and analyzes one of the simplest but most robust
        and practical NAT traversal techniques, commonly known as hole punching. Hole
        punching is moderately well-understood for UDP communication, but we show how it
        can be reliably used to set up peer-to-peer TCP streams as well. After gathering
        data on the reliability of this technique on a wide variety of deployed NATs, we
        nd that about 82\% of the NATs tested support hole punching for UDP, and about
        64\% support hole punching for TCP streams. As NAT vendors become increasingly
        conscious of the needs of important P2P applications such as Voice over IP and
        online gaming protocols, support for hole punching is likely to increase in the
        future}, 
  www_section = {communication network, ip address, NAT, nat traversal techniques, network
        address translation, P2P, peer-to-peer networking}, 
  url = {http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.59.6799\&rep=rep1\&type=pdf},
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.59.6799.pdf}, 
}
Massoulie:2005:CRS:1064212.1064215
@conference{Massoulie:2005:CRS:1064212.1064215,
  title = {Coupon replication systems}, 
  author = {Massouli{\'e}, Laurent and Vojnovi{\'c}, Milan}, 
  booktitle = {SIGMETRICS'05. Proceedings of the 2005 ACM SIGMETRICS International
        Conference on Measurement and Modeling of Computer Systems}, 
  organization = {ACM}, 
  year = {2005}, 
  month = {June}, 
  address = {Banff, Alberta, Canada}, 
  pages = {2--13}, 
  publisher = {ACM}, 
  series = {SIGMETRICS '05}, 
  abstract = {Motivated by the study of peer-to-peer file swarming systems {\`a} la
        BitTorrent, we introduce a probabilistic model of coupon replication systems.
        These systems consist of users, aiming to complete a collection of distinct
        coupons. Users are characterised by their current collection of coupons, and
        leave the system once they complete their coupon collection. The system evolution
        is then specified by describing how users of distinct types meet, and which
        coupons get replicated upon such encounters.For open systems, with exogenous user
        arrivals, we derive necessary and sufficient stability conditions in a layered
        scenario, where encounters are between users holding the same number of coupons.
        We also consider a system where encounters are between users chosen uniformly at
        random from the whole population. We show that performance, captured by sojourn
        time, is asymptotically optimal in both systems as the number of coupon types
        becomes large.We also consider closed systems with no exogenous user arrivals. In
        a special scenario where users have only one missing coupon, we evaluate the size
        of the population ultimately remaining in the system, as the initial number of
        users, N, goes to infinity. We show that this decreases geometrically with the
        number of coupons, K. In particular, when the ratio K/log(N) is above a critical
        threshold, we prove that this number of left-overs is of order log(log(N)).These
        results suggest that performance of file swarming systems does not depend
        critically on either altruistic user behavior, or on load balancing strategies
        such as rarest first}, 
  www_section = {content distribution, file swarming, peer-to-peer networking}, 
  isbn = {1-59593-022-1}, 
  doi = {http://doi.acm.org/10.1145/1064212.1064215}, 
  url = {http://doi.acm.org/10.1145/1064212.1064215}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/SIGMETRICS\%2705\%20-\%20Coupon\%20replication\%20systems.pdf},
}
Massoulie:2006:PCS:1146381.1146402
@conference{Massoulie:2006:PCS:1146381.1146402,
  title = {Peer counting and sampling in overlay networks: random walk methods}, 
  author = {Massouli{\'e}, Laurent and Erwan Le Merrer and Anne-Marie Kermarrec and Ganesh,
        Ayalvadi}, 
  booktitle = {PODC '06--Proceedings of the 25th Annual ACM Symposium on Principles of
        Distributed Computing}, 
  organization = {ACM}, 
  year = {2006}, 
  month = {July}, 
  address = {Denver, Colorado, USA}, 
  pages = {123--132}, 
  publisher = {ACM}, 
  series = {PODC '06}, 
  abstract = {In this article we address the problem of counting the number of peers in a
        peer-to-peer system, and more generally of aggregating statistics of individual
        peers over the whole system. This functionality is useful in many applications,
        but hard to achieve when each node has only a limited, local knowledge of the
        whole system. We propose two generic techniques to solve this problem. The Random
        Tour method is based on the return time of a continuous time random walk to the
        node originating the query. The Sample and Collide method is based on counting
        the number of random samples gathered until a target number of redundant samples
        are obtained. It is inspired by the "birthday paradox" technique of [6], upon
        which it improves by achieving a target variance with fewer samples. The latter
        method relies on a sampling sub-routine which returns randomly chosen peers. Such
        a sampling algorithm is of independent interest. It can be used, for instance,
        for neighbour selection by new nodes joining the system. We use a continuous time
        random walk to obtain such samples. We analyse the complexity and accuracy of the
        two methods. We illustrate in particular how expansion properties of the overlay
        affect their performance}, 
  www_section = {expander graphs, random walks, sampling}, 
  isbn = {1-59593-384-0}, 
  doi = {http://doi.acm.org/10.1145/1146381.1146402}, 
  url = {http://doi.acm.org/10.1145/1146381.1146402}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/PODC\%2706\%20-\%20Peer\%20counting\%20and\%20sampling\%20in\%20overlay\%20networks.pdf},
}
Maymounkov02kademlia:a
@conference{Maymounkov02kademlia:a,
  title = {Kademlia: A Peer-to-peer Information System Based on the XOR Metric}, 
  author = {Petar Maymounkov and David Mazi{\`e}res}, 
  booktitle = {IPTPS '01--Revised Papers from the First International Workshop on
        Peer-to-Peer System}, 
  organization = {Springer-Verlag}, 
  volume = {2429}, 
  year = {2002}, 
  month = {March}, 
  address = {Cambridge, MA, USA}, 
  pages = {53--65}, 
  publisher = {Springer-Verlag}, 
  series = {Lecture Notes in Computer Science}, 
  abstract = {We describe a peer-to-peer distributed hash table with provable consistency
        and performance in a fault-prone environment. Our system routes queries and
        locates nodes using a novel XOR-based metric topology that simplifies the
        algorithm and facilitates our proof. The topology has the property that every
        message exchanged conveys or reinforces useful contact information. The system
        exploits this information to send parallel, asynchronous query messages that
        tolerate node failures without imposing timeout delays on users}, 
  www_section = {distributed hash table, fault-tolerance, Kademlia, P2P}, 
  isbn = {3-540-44179-4}, 
  doi = {10.1007/3-540-45748-8_5}, 
  url = {http://www.springerlink.com/content/2ekx2a76ptwd24qt/}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/kpos_0.pdf}, 
}
Maymounkov02onlinecodes
@booklet{Maymounkov02onlinecodes,
  title = {Online codes (Extended Abstract)}, 
  author = {Petar Maymounkov}, 
  year = {2002}, 
  abstract = {We introduce online codes -- a class of near-optimal codes for a very general
        loss channel which we call the free channel. Online codes are linear
        encoding/decoding time codes, based on sparse bipartite graphs, similar to
        Tornado codes, with a couple of novel properties: local encodability and
        rateless-ness. Local encodability is the property that each block of the encoding
        of a message can be computed independently from the others in constant time. This
        also implies that each encoding block is only dependent on a constant-sized part
        of the message and a few preprocessed bits. Rateless-ness is the property that
        each message has an encoding of practically infinite size. We argue that rateless
        codes are more appropriate than fixed-rate codes for most situations where
        erasure codes were considered a solution. Furthermore, rateless codes meet new
        areas of application, where they are not replaceable by fixed-rate codes. One
        such area is information dispersal over peer-to-peer networks}, 
  www_section = {coding theory, local encodability, rateless-ness, sparse bipartite
        graphs}, 
  url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.112.1333}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.112.1333.pdf}, 
}
Mazieres:2002:BSF:571825.571840
@conference{Mazieres:2002:BSF:571825.571840,
  title = {Building secure file systems out of Byzantine storage}, 
  author = {David Mazi{\`e}res and Shasha, Dennis}, 
  booktitle = {PODC'02--Proceedings of the 21st Annual Symposium on Principles of
        Distributed Computing}, 
  organization = {ACM}, 
  year = {2002}, 
  month = {July}, 
  address = {Monterey, CA, USA}, 
  pages = {108--117}, 
  publisher = {ACM}, 
  series = {PODC '02}, 
  abstract = {This paper shows how to implement a trusted network file system on an
        untrusted server. While cryptographic storage techniques exist that allow users
        to keep data secret from untrusted servers, this work concentrates on the
        detection of tampering attacks and stale data. Ideally, users of an untrusted
        storage server would immediately and unconditionally notice any misbehavior on
        the part of the server. This ideal is unfortunately not achievable. However, we
        define a notion of data integrity called fork consistency in which, if the server
        delays just one user from seeing even a single change by another, the two users
        will never again see one another's changes---a failure easily detectable with
        on-line communication. We give a practical protocol for a multi-user network file
        system called SUNDR, and prove that SUNDR offers fork consistency whether or not
        the server obeys the protocol}, 
  www_section = {Byzantine storage, detection, secure file system, stale data, tampering
        attack, trusted network, untrusted server}, 
  isbn = {1-58113-485-1}, 
  doi = {http://doi.acm.org/10.1145/571825.571840}, 
  url = {http://doi.acm.org/10.1145/571825.571840}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/PODC\%2702\%20-\%20Building\%20secure\%20file\%20systems\%20out\%20of\%20Byzantine\%20storage.pdf},
}
Md06e.:anonymous
@conference{Md06e.:anonymous,
  title = {E.: Anonymous Secure Communication in Wireless Mobile Ad-hoc Networks}, 
  author = {Sk. Md. Mizanur Rahman and Atsuo Inomata and Takeshi Okamoto and Masahiro
        Mambo}, 
  booktitle = {In: Proceedings of the First International Conference on Ubiquitous
        Convergence Technology}, 
  organization = {Springer}, 
  year = {2006}, 
  pages = {131--140}, 
  publisher = {Springer}, 
  abstract = {The main characteristic of a mobile ad-hoc network is its
        infrastructure-less, highly dynamic topology, which is subject to malicious
        traffic analysis. Malicious intermediate nodes in wireless mobile ad-hoc networks
        are a threat concerning security as well as anonymity of exchanged information.
        To protect anonymity and achieve security of nodes in mobile ad-hoc networks, an
        anonymous on-demand routing protocol, termed RIOMO, is proposed. For this
        purpose, pseudo IDs of the nodes are generated considering Pairing-based
        Cryptography. Nodes can generate their own pseudo IDs independently. As a result
        RIOMO reduces pseudo IDs maintenance costs. Only trust-worthy nodes are allowed
        to take part in routing to discover a route. To ensure trustiness each node has
        to make authentication to its neighbors through an anonymous authentication
        process. Thus RIOMO safely communicates between nodes without disclosing node
        identities; it also provides different desirable anonymous properties such as
        identity privacy, location privacy, route anonymity, and robustness against
        several attacks}, 
  www_section = {ad-hoc networks, anonymity, routing}, 
  doi = {10.1007/978-3-540-71789-8}, 
  url = {http://www.springerlink.com/content/g6334148068w1254/}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.74.1585.pdf}, 
}
Member_enablingadaptive
@article{Member_enablingadaptive,
  title = {Enabling Adaptive Video Streaming in P2P Systems}, 
  author = {Dan Jurca and Jacob Chakareski and Jean-Paul Wagner and Pascal Frossard}, 
  journal = {IEEE Communications Magazine}, 
  volume = {45}, 
  year = {2007}, 
  pages = {108--114}, 
  abstract = {Peer-to-peer (P2P) systems are becoming increasingly popular due to their
        ability to deliver large amounts of data at a reduced deployment cost. In
        addition to fostering the development of novel media applications, P2P systems
        also represent an interesting alternative paradigm for media streaming
        applications that can benefit from the inherent self organization and resource
        scalability available in such environments. This article presents an overview of
        application and network layer mechanisms that enable successful streaming
        frameworks in peer-to-peer systems. We describe media delivery architectures that
        can be deployed over P2P networks to address the specific requirements of
        streaming applications. In particular, we show how video-streaming applications
        can benefit from the diversity offered by P2P systems and implement
        distributed-streaming and scheduling solutions with multi-path packet
        transmission}, 
  www_section = {distributed packet scheduling, flexible media encoding, path diversity,
        peer-to-peer networking}, 
  issn = {0163-6804}, 
  doi = {10.1109/MCOM.2007.374427}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/IEEE\%20Communications\%20Magazine\%20-\%20Video\%20Streaming\%20in\%20P2P\%20Systems.pdf},
}
Michiardi01core:a
@conference{Michiardi01core:a,
  title = {CORE: A Collaborative Reputation Mechanism to enforce node cooperation in Mobile
        Ad hoc Networks}, 
  author = {Pietro Michiardi and Refik Molva}, 
  booktitle = {CORE: A Collaborative Reputation Mechanism to enforce node cooperation in
        Mobile Ad hoc Networks}, 
  year = {2001}, 
  pages = {107--121}, 
  abstract = {Countermeasures for node misbehavior and selfishness are mandatory
        requirements in MANET. Selfishness that causes lack of node activity cannot be
        solved by classical security means that aim at verifying the correctness and
        integrity of an operation. We suggest a generic mechanism based on reputation to
        enforce cooperation among the nodes of a MANET to prevent selfish behavior. Each
        network entity keeps track of other entities' collaboration using a technique
        called reputation. The reputation is calculated based on various types of
        information on each entity's rate of collaboration. Since there is no incentive
        for a node to maliciously spread negative information about other nodes, simple
        denial of service attacks using the collaboration technique itself are prevented.
        The generic mechanism can be smoothly extended to basic network functions with
        little impact on existing protocols}, 
  www_section = {ad-hoc networks}, 
  url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.58.4100}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/michpi-020801.pdf}, 
}
Miller06robustcomposition:
@booklet{Miller06robustcomposition:,
  title = {Robust Composition: Towards a Unified Approach to Access Control and Concurrency
        Control}, 
  author = {Mark Samuel Miller}, 
  year = {2006}, 
  abstract = {Permission is hereby granted to make and distribute verbatim copies of this
        document without royalty or fee. Permission is granted to quote excerpts from
        this documented provided the original source is properly cited. ii When
        separately written programs are composed so that they may cooperate, they may
        instead destructively interfere in unanticipated ways. These hazards limit the
        scale and functionality of the software systems we can successfully compose. This
        dissertation presents a framework for enabling those interactions between
        components needed for the cooperation we intend, while minimizing the hazards of
        destructive interference. Great progress on the composition problem has been made
        within the object paradigm, chiefly in the context of sequential, single-machine
        programming among benign components. We show how to extend this success to
        support robust composition of concurrent and potentially malicious components
        distributed over potentially malicious machines. We present E, a distributed,
        persistent, secure programming language, and CapDesk, a virus-safe desktop built
        in E, as embodiments of the techniques we explain}, 
  www_section = {robustness}, 
  url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.101.4674}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.101.4674.pdf}, 
}
Minsky00setreconciliation
@conference{Minsky00setreconciliation,
  title = {Set Reconciliation with Nearly Optimal Communication Complexity}, 
  author = {Yaron Minsky and Ari Trachtenberg and Richard Zippel}, 
  booktitle = {International Symposium on Information Theory}, 
  year = {2000}, 
  pages = {0--232}, 
  www_section = {set reconciliation}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/reconcile.pdf}, 
  url = {https://bibliography.gnunet.org}, 
}
Minsky02practicalset
@booklet{Minsky02practicalset,
  title = {Practical Set Reconciliation}, 
  author = {Yaron Minsky and Ari Trachtenberg}, 
  year = {2002}, 
  www_section = {set reconciliation}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/practical.pdf}, 
}
Mislove04ap3:cooperative
@conference{Mislove04ap3:cooperative,
  title = {AP3: Cooperative, decentralized anonymous communication}, 
  author = {Mislove, Alan and Oberoi, Gaurav and Post, Ansley and Reis, Charles and
        Druschel, Peter and Wallach, Dan S}, 
  booktitle = {IN PROC. OF SIGOPS EUROPEAN WORKSHOP}, 
  year = {2004}, 
  abstract = {This paper describes a cooperative overlay network that provides anonymous
        communication services for participating users. The Anonymizing Peer-to-Peer
        Proxy (AP3) system provides clients with three primitives: (i) anonymous message
        delivery, (ii) anonymous channels, and (iii) secure pseudonyms. AP3 is designed
        to be lightweight, low-cost and provides "probable innocence" anonymity to
        participating users, even under a large-scale coordinated attack by a limited
        fraction of malicious overlay nodes. Additionally, we use AP3's primitives to
        build novel anonymous group communication facilities (multicast and anycast),
        which shield the identity of both publishers and subscribers}, 
  www_section = {anonymity, Peer-to-Peer Proxy}, 
  doi = {10.1145/1133572.1133578}, 
  url = {http://portal.acm.org/citation.cfm?id=1133578}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.61.6219.pdf}, 
}
Montenegro02statisticallyunique
@booklet{Montenegro02statisticallyunique,
  title = {Statistically Unique and Cryptographically Verifiable (SUCV) Identifiers and
        Addresses}, 
  author = {Gabriel Montenegro}, 
  year = {2002}, 
  abstract = {This paper addresses the identifier ownership problem. It does so by using
        characteristics of Statistic Uniqueness and Cryptographic Verifiability (SUCV) of
        certain entities which this document calls SUCV Identifiers and Addresses. Their
        characteristics allow them to severely limit certain classes of denial of service
        attacks and hijacking attacks. SUCV addresses are particularly applicable to
        solve the address ownership problem that hinders mechanisms like Binding Updates
        in Mobile IPv6}, 
  url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.16.1456}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.16.1456.pdf}, 
%%%%% ERROR: Missing field
% www_section = {?????},
}
Moore05counteringhidden-action
@conference{Moore05counteringhidden-action,
  title = {Countering Hidden-action Attacks on Networked Systems}, 
  author = {Tyler Moore}, 
  booktitle = {WEIS'05. Fourth Workshop on the Economics of Information Security}, 
  year = {2005}, 
  month = {June}, 
  address = {Cambridge, England}, 
  abstract = {We define an economic category of hidden-action attacks: actions made
        attractive by a lack of observation. We then consider its implications for
        computer systems. Rather than structure contracts to compensate for incentive
        problems, we rely on insights from social capital theory to design network
        topologies and interactions that undermine the potential for hidden-action
        attacks}, 
  www_section = {asymmetric information, computer security, decentralized, economics,
        information security, moral hazard, social capital}, 
  doi = {10.1.1.119.8132}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/WEIS\%2705\%20-\%20Moore\%20-\%20Counterin\%20hidden-action\%20attacks.pdf},
}
Mui:2002:CMT:820745.821158
@conference{Mui:2002:CMT:820745.821158,
  title = {A Computational Model of Trust and Reputation}, 
  author = {Lik Mui and Mojdeh Mohtashemi and Ari Halberstadt}, 
  booktitle = {HICSS'02. Proceedings of the 35th Annual Hawaii International Conference on
        System Sciences}, 
  organization = {IEEE Computer Society}, 
  year = {2002}, 
  month = {January}, 
  address = {Big Island, Hawaii, USA}, 
  pages = {2431--2439}, 
  publisher = {IEEE Computer Society}, 
  series = {HICSS '02}, 
  abstract = {Despite their many advantages, e-businesses lag behind brick and mortar
        businesses in several fundamental respects. This paper concerns one of these:
        relationships based on trust and reputation. Recent studies on simple reputation
        systems for e-Businesses such as eBay have pointed to the importance of such
        rating systems for deterring moral hazard and encouraging trusting interactions.
        However, despite numerous studies on trust and reputation systems, few have taken
        studies across disciplines to provide an integrated account of these concepts and
        their relationships. This paper first surveys existing literatures on trust,
        reputation and a related concept: reciprocity. Based on sociological and
        biological understandings of these concepts, a computational model is proposed.
        This model can be implemented in a real system to consistently calculate agents'
        trust and reputation scores}, 
  www_section = {e-business, moral hazard, reciprocity, reputation, trust}, 
  isbn = {0-7695-1435-9}, 
  doi = {http://dx.doi.org/10.1109/HICSS.2002.994181}, 
  url = {http://dl.acm.org/citation.cfm?id=820745.821158}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/HICSS\%2702\%20-\%20A\%20computational\%20model\%20of\%20trust\%20and\%20reputation.pdf},
}
Murillo:2011:SCT:1938287.1938323
@article{Murillo:2011:SCT:1938287.1938323,
  title = {Schedule coordination through egalitarian recurrent multi-unit combinatorial
        auctions}, 
  author = {Murillo, Javier and Mu{\~n}oz, V{\'\i}ctor and Busquets, D{\'\i}dac and
        L{\'o}pez, Beatriz}, 
  journal = {Applied Intelligence}, 
  volume = {34}, 
  number = {1}, 
  year = {2011}, 
  month = {April}, 
  address = {Hingham, MA, USA}, 
  pages = {47--63}, 
  publisher = {Kluwer Academic Publishers}, 
  abstract = {When selfish industries are competing for limited shared resources, they need
        to coordinate their activities to handle possible conflicting situations.
        Moreover, this coordination should not affect the activities already planned by
        the industries, since this could have negative effects on their performance.
        Although agents may have buffers that allow them to delay the use of resources,
        these are of a finite capacity, and therefore cannot be used indiscriminately.
        Thus, we are faced with the problem of coordinating schedules that have already
        been generated by the agents. To address this task, we propose to use a recurrent
        auction mechanism to mediate between the agents. Through this auction mechanism,
        the agents can express their interest in using the resources, thus helping the
        scheduler to find the best distribution. We also introduce a priority mechanism
        to add fairness to the coordination process. The proposed coordination mechanism
        has been applied to a waste water treatment system scenario, where different
        industries need to discharge their waste. We have simulated the behavior of the
        system, and the results show that using our coordination mechanism the waste
        water treatment plant can successfully treat most of the discharges, while the
        production activity of the industries is almost not affected by it}, 
  www_section = {auction mechanisms, auctions, economy, egalitarism, schedule
        coordination}, 
  issn = {0924-669X}, 
  doi = {10.1007/s10489-009-0178-7}, 
  url = {http://dx.doi.org/10.1007/s10489-009-0178-7}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Applied\%20Intelligence\%20-\%20Combinatorial\%20Auctions.pdf},
}
Muthitacharoen02ivy:a
@conference{Muthitacharoen02ivy:a,
  title = {Ivy: A Read/Write Peer-to-Peer File System}, 
  author = {Muthitacharoen, Athicha and Robert Morris and Thomer M. Gil and Bengie Chen}, 
  booktitle = {Ivy: A Read/Write Peer-to-Peer File System}, 
  year = {2002}, 
  pages = {31--44}, 
  abstract = {Ivy is a multi-user read/write peer-to-peer file system. Ivy has no
        centralized or dedicated components, and it provides useful integrity properties
        without requiring users to fully trust either the underlying peer-to-peer storage
        system or the other users of the file system}, 
  www_section = {distributed storage, P2P}, 
  url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.20.2147}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.20.2147.pdf}, 
}
Naor03asimple
@conference{Naor03asimple,
  title = {A Simple Fault Tolerant Distributed Hash Table}, 
  author = {Moni Naor and Udi Wieder}, 
  booktitle = {In Second International Workshop on Peer-to-Peer Systems}, 
  year = {2003}, 
  pages = {88--97}, 
  abstract = {We introduce a distributed hash table (DHT) with logarithmic degree and
        logarithmic dilation. We show two lookup algorithms. The first has a message
        complexity of and is robust under random deletion of nodes. The second has
        parallel time of and message complexity of . It is robust under spam induced by a
        random subset of the nodes. We then show a construction which is fault tolerant
        against random deletions and has an optimal degree-dilation tradeoff. The
        construction has improved parameters when compared to other DHTs. Its main merits
        are its simplicity, its flexibility and the fresh ideas introduced in its design.
        It is very easy to modify and to add more sophisticated protocols, such as
        dynamic caching and erasure correcting codes}, 
  www_section = {distributed hash table, fault-tolerance}, 
  doi = {10.1007/b11823}, 
  url = {http://www.springerlink.com/content/4e756fgyq4ff4kay/}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.12.3388.pdf}, 
}
Ng:2004:NPS:1247415.1247426
@conference{Ng:2004:NPS:1247415.1247426,
  title = {A Network Positioning System for the Internet}, 
  author = {Ng, T. S. Eugene and Zhang, Hui}, 
  booktitle = {ATEC'04. Proceedings of the Annual Conference on USENIX Annual Technical
        Conference}, 
  organization = {USENIX Association}, 
  year = {2004}, 
  month = {June}, 
  address = {Boston, Massachusetts, USA}, 
  pages = {11--11}, 
  publisher = {USENIX Association}, 
  series = {ATEC '04}, 
  abstract = {Network positioning has recently been demonstrated to be a viable concept to
        represent the network distance relationships among Internet end hosts. Several
        subsequent studies have examined the potential benefits of using network position
        in applications, and proposed alternative network positioning algorithms. In this
        paper, we study the problem of designing and building a network positioning
        system (NPS). We identify several key system-building issues such as the
        consistency, adaptivity and stability of host network positions over time. We
        propose a hierarchical network positioning architecture that maintains
        consistency while enabling decentralization, a set of adaptive decentralized
        algorithms to compute and maintain accurate, stable network positions, and
        finally present a prototype system deployed on PlanetLab nodes that can be used
        by a variety of applications. We believe our system is a viable first step to
        provide a network positioning capability in the Internet}, 
  www_section = {Internet, network positioning algorithms, network positioning system,
        nps}, 
  url = {http://dl.acm.org/citation.cfm?id=1247415.1247426}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/ATEC\%2704\%20-\%20A\%20network\%20positioning\%20system.pdf},
}
Nielson05ataxonomy
@conference{Nielson05ataxonomy,
  title = {A Taxonomy of Rational Attacks}, 
  author = {Seth James Nielson and Scott A. Crosby}, 
  booktitle = {Proceedings of the 4th International Workshop on Peer-to-Peer Systems (IPTPS
        '05}, 
  organization = {Springer-Verlag}, 
  year = {2005}, 
  pages = {36--46}, 
  publisher = {Springer-Verlag}, 
  abstract = {For peer-to-peer services to be effective, participating nodes must
        cooperate, but in most scenarios a node represents a self-interested party and
        cooperation can neither be expected nor enforced. A reasonable assumption is that
        a large fraction of p2p nodes are rational and will attempt to maximize their
        consumption of system resources while minimizing the use of their own. If such
        behavior violates system policy then it constitutes an attack. In this paper we
        identify and create a taxonomy for rational attacks and then identify
        corresponding solutions if they exist. The most effective solutions directly
        incentivize cooperative behavior, but when this is not feasible the common
        alternative is to incentivize evidence of cooperation instead}, 
  www_section = {attack, P2P}, 
  doi = {10.1007/11558989}, 
  url = {http://www.springerlink.com/content/lh21385ml723844j/}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/CameraReady_240.pdf}, 
}
Nisan:1999:AMD:301250.301287
@conference{Nisan:1999:AMD:301250.301287,
  title = {Algorithmic Mechanism Design}, 
  author = {Nisan, Noam and Ronen, Amir}, 
  booktitle = {STOC'99. Proceedings of the thirty-first Annual ACM Symposium On Theory of
        Computing}, 
  organization = {ACM}, 
  year = {1999}, 
  month = {May}, 
  address = {Atlanta, Georgia, USA}, 
  pages = {129--140}, 
  publisher = {ACM}, 
  series = {STOC '99}, 
  abstract = {We consider algorithmic problems in a distributed setting where the
        participants cannot be assumed to follow the algorithm but rather their own
        self-interest. As such participants, termed agents, are capable of manipulating
        the algorithm, the algorithm designer should ensure in advance that the agents '
        interests are best served by behaving correctly. Following notions from the field
        of mechanism design, we suggest a framework for studying such algorithms. Our
        main technical contribution concerns the study of a representative task
        scheduling problem for which the standard mechanism design tools do not suffice}, 
  www_section = {algorithms, mechanis design}, 
  isbn = {1-58113-067-8}, 
  doi = {http://doi.acm.org/10.1145/301250.301287}, 
  url = {http://doi.acm.org/10.1145/301250.301287}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/STOC\%2799\%20-\%20Nisan\%20\%26\%20Ronen\%20-\%20Algorithmic\%20mechanism\%20design.pdf},
}
Nisan:1999:ASA:1764891.1764893
@conference{Nisan:1999:ASA:1764891.1764893,
  title = {Algorithms for Selfish Agents}, 
  author = {Nisan, Noam}, 
  booktitle = {STAC'99. Symposium on Theoretical Aspects of Computer Science}, 
  organization = {Springer-Verlag}, 
  year = {1999}, 
  month = {March}, 
  address = {Trier, Germany}, 
  pages = {1--15}, 
  publisher = {Springer-Verlag}, 
  series = {STACS'99}, 
  abstract = {This paper considers algorithmic problems in a distributed setting where the
        participants cannot be assumed to follow the algorithm but rather their own
        self-interest. Such scenarios arise, in particular, when computers or users aim
        to cooperate or trade over the Internet. As such participants, termed agents, are
        capable of manipulating the algorithm, the algorithm designer should ensure in
        advance that the agents' interests are best served by behaving correctly. This
        exposition presents a model to formally study such algorithms. This model, based
        on the field of mechanism design, is taken from the author's joint work with Amir
        Ronen, and is similar to approaches taken in the distributed AI community in
        recent years. Using this model, we demonstrate how some of the techniques of
        mechanism design can be applied towards distributed computation problems. We then
        exhibit some issues that arise in distributed computation which require going
        beyond the existing theory of mechanism design}, 
  www_section = {algorithms, mechanism design, selfish agent}, 
  isbn = {3-540-65691-X}, 
  url = {http://dl.acm.org/citation.cfm?id=1764891.1764893}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/STACS\%2799\%20-\%20Nisan\%20-\%20Algorithms\%20for\%20selfish\%20agents.pdf},
}
Ogata97faulttolerant
@book{Ogata97faulttolerant,
  title = {Fault Tolerant Anonymous Channel}, 
  author = {Wakaha Ogata and Kaoru Kurosawa and Kazue Sako and Kazunori Takatani}, 
  booktitle = {Information and Communications Security}, 
  organization = {Springer Berlin / Heidelberg}, 
  volume = {1334/1997}, 
  year = {1997}, 
  pages = {440--444}, 
  publisher = {Springer Berlin / Heidelberg}, 
  series = {Lecture Notes in Computer Science}, 
  abstract = {This paper describes a zero-knowledge proof that a mix in onion routing can
        perform in order to proof that it did route the messages properly. This allows
        the deployment of a mix-net where malicious mixes can be detected without using
        dummy-traffic to probe for correctness. Technical}, 
  url = {http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.19.357\&rep=rep1\&type=url\&i=0},
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/fault.dvi_.pdf}, 
  www_section = {Unsorted}, 
}
Ostrovsky:2007:AEN:1315245.1315270
@conference{Ostrovsky:2007:AEN:1315245.1315270,
  title = {Attribute-based encryption with non-monotonic access structures}, 
  author = {Rafail Ostrovsky and Amit Sahai and Waters, Brent}, 
  booktitle = {CCS'07--Proceedings of the 14th ACM Conference on Computer and
        Communications Security}, 
  organization = {ACM}, 
  year = {2007}, 
  month = {October}, 
  address = {Alexandria, VA, USA}, 
  pages = {195--203}, 
  publisher = {unknown}, 
  series = {CCS '07}, 
  abstract = {We construct an Attribute-Based Encryption (ABE) scheme that allows a user's
        private key to be expressed in terms of any access formula over attributes.
        Previous ABE schemes were limited to expressing only monotonic access structures.
        We provide a proof of security for our scheme based on the Decisional Bilinear
        Diffie-Hellman (BDH) assumption. Furthermore, the performance of our new scheme
        compares favorably with existing, less-expressive schemes}, 
  www_section = {ABE, BDH, Decisional bilinear diffie-hellman, encryption, non-monotonic
        access}, 
  isbn = {978-1-59593-703-2}, 
  doi = {http://doi.acm.org/10.1145/1315245.1315270}, 
  url = {http://doi.acm.org/10.1145/1315245.1315270}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/CCS\%2707\%20-\%20ABE\%20with\%20non-monotonic\%20access\%20structures.pdf},
}
Oswald02capacity-achievingsequences
@article{Oswald02capacity-achievingsequences,
  title = {Capacity-achieving sequences for the erasure channel}, 
  author = {Peter Oswald and M. Amin Shokrollahi}, 
  journal = {IEEE Trans. Information Theory}, 
  volume = {48}, 
  year = {2002}, 
  month = {December}, 
  pages = {3017--3028}, 
  abstract = {This paper starts a systematic study of capacity-achieving sequences of
        low-density paritycheck codes for the erasure channel. We introduce a class A of
        analytic functions and develop a procedure to obtain degree distributions for the
        codes. We showvarious properties of this class which will help us construct new
        distributions from old ones. We then study certain types of capacity-achieving
        sequences and introduce new measures for their optimality. For instance, it turns
        out that the right-regular sequence is capacity-achieving in a much stronger
        sense than, e.g., the Tornado sequence. This also explains why numerical
        optimization techniques tend to favor graphs with only one degree of check nodes.
        Using our methods, we attack the problem of reducing the fraction of degree 2
        variable nodes, which has important practical implications. It turns out that one
        can produce capacity achieving sequences for which this fraction remains below
        any constant, albeit at the price of slower convergence to capacity}, 
  www_section = {coding theory, low-density parity-check}, 
  issn = {0018-9448}, 
  doi = {10.1109/TIT.2002.805067}, 
  url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.83.6722}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.92.7281.pdf}, 
}
PIK
@conference{PIK,
  title = {Efficient anonymous channel and all/nothing election scheme}, 
  author = {Choonsik Park and Kazutomo Itoh and Kaoru Kurosawa}, 
  booktitle = {Proceedings of EUROCRYPT 1993}, 
  organization = {Springer-Verlag, LNCS 765}, 
  year = {1993}, 
  address = {Lofthus, Norway}, 
  pages = {248--259}, 
  publisher = {Springer-Verlag, LNCS 765}, 
  abstract = {The contribution of this paper are twofold. First, we present an efficient
        computationally secure anonymous channel which has no problme of ciphertext
        length expansion. The length is irrelevant to the number of MIXes(control
        centers). It improves the efficiency of Chaums's election scheme based on the MIX
        net automatically. Second, we show an election scheme which satisfies fairness.
        That is, if some vote is disrupted, no one obtains any infromation about all the
        other votes. Each voter sends O(nk) bits so that the probability of the fairness
        is 1-2^-k, where n is the bit length of the ciphertext}, 
  isbn = {3-540-57600-2}, 
  url = {http://portal.acm.org/citation.cfm?id=188307.188351}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/mix.pdf}, 
  www_section = {Unsorted}, 
}
PShuffle
@conference{PShuffle,
  title = {An Efficient Scheme for Proving a Shuffle}, 
  author = {Jun Furukawa and Kazue Sako}, 
  booktitle = {Proceedings of {CRYPTO} 2001}, 
  organization = {Springer-Verlag, LNCS 2139}, 
  year = {2001}, 
  editor = {Joe Kilian}, 
  publisher = {Springer-Verlag, LNCS 2139}, 
  abstract = {In this paper, we propose a novel and efficient protocol for proving the
        correctness of a shuffle, without leaking how the shuffle was performed. Using
        this protocol, we can prove the correctness of a shuffle of n data with roughly
        18n exponentiations, where as the protocol of Sako-Kilian[SK95] required 642n and
        that of Abe[Ab99] required 22n log n. The length of proof will be only 211 n bits
        in our protocol, opposed to 218 n bits and 214 n log n bits required by
        Sako-Kilian and Abe, respectively. The proposed protocol will be a building block
        of an efficient, universally verifiable mix-net, whose application to voting
        system is prominent}, 
  isbn = {978-3-540-42456-7}, 
  doi = {10.1007/3-540-44647-8}, 
  url = {http://portal.acm.org/citation.cfm?id=704279}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/PShuffle.pdf}, 
%%%%% ERROR: Missing field
% www_section = {?????},
}
Padmanabhan:2003:RPS:951950.952204
@conference{Padmanabhan:2003:RPS:951950.952204,
  title = {Resilient Peer-to-Peer Streaming}, 
  author = {Venkata N. Padmanabhan and Wang, Helen J. and Chou, Philip A.}, 
  booktitle = {ICNP'03. Proceedings of the 11th IEEE International Conference on Network
        Protocols}, 
  organization = {IEEE Computer Society}, 
  year = {2003}, 
  month = {November}, 
  address = {Atlanta, Georgia, USA}, 
  pages = {0--16}, 
  publisher = {IEEE Computer Society}, 
  series = {ICNP '03}, 
  abstract = {We consider the problem of distributing "live" streaming media content to a
        potentially large and highly dynamic population of hosts. Peer-to-peer content
        distribution is attractive in this setting because the bandwidth available to
        serve content scales with demand. A key challenge, however, is making content
        distribution robust to peer transience. Our approach to providing robustness is
        to introduce redundancy, both in network paths and in data. We use multiple,
        diverse distribution trees to provide redundancy in network paths and multiple
        description coding (MDC) to provide redundancy in data.We present a simple tree
        management algorithm that provides the necessary path diversity and describe an
        adaptation framework for MDC based on scalable receiver feedback. We evaluate
        these using MDC applied to real video data coupled with real usage traces from a
        major news site that experienced a large flash crowd for live streaming content.
        Our results show very significant benefits in using multiple distribution trees
        and MDC, with a 22 dB improvement in PSNR in some cases}, 
  www_section = {distribution trees, mdc, media content, multiple description coding,
        peer-to-peer streaming}, 
  isbn = {0-7695-2024-3}, 
  url = {http://dl.acm.org/citation.cfm?id=951950.952204}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/ICNP\%2703\%20-\%20Resilient\%20peer-to-peer\%20streaming.pdf},
}
Pai06improvingrobustness
@conference{Pai06improvingrobustness,
  title = {Improving Robustness of Peer-to-Peer Streaming with Incentives}, 
  author = {Vinay Pai and Alexander E. Mohr}, 
  booktitle = {NetEcon'06. 1st Workshop on the Economics of Networked Systems}, 
  organization = {ACM}, 
  year = {2006}, 
  month = {June}, 
  address = {Ann Arbor, Michigan, USA}, 
  publisher = {ACM}, 
  abstract = {In this paper we argue that a robust incentive mechanism is important in a
        real-world peer-to-peer streaming system to ensure that nodes contribute as much
        upload bandwidth as they can. We show that simple tit-for-tat mechanisms which
        work well in file-sharing systems like BitTorrent do not perform well given the
        additional delay and bandwidth constraints imposed by live streaming. We present
        preliminary experimental results for an incentive mechanism based on the Iterated
        Prisoner's Dilemma problem that allows all nodes to download with low packet loss
        when there is sufficient capacity in the system, but when the system is
        resource-starved, nodes that contribute upload bandwidth receive better service
        than those that do not. Moreover, our algorithm does not require nodes to rely on
        any information other than direct observations of its neighbors ' behavior
        towards it}, 
  www_section = {peer-to-peer streaming, tit-for-tat}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/NetEcon\%2706\%20-\%20Improving\%20robustness\%20of\%20p2p\%20streaming.pdf},
}
Papadopouli00sevendegrees
@conference{Papadopouli00sevendegrees,
  title = {Seven Degrees of Separation in Mobile Ad Hoc Networks}, 
  author = {Maria Papadopouli and Henning G. Schulzrinne}, 
  booktitle = {In IEEE GLOBECOM}, 
  year = {2000}, 
  pages = {1707--1711}, 
  abstract = {We present an architecture that enables the sharing of information among
        mobile, wireless, collaborating hosts that experience intermittent connectivity
        to the Internet. Participants in the system obtain data objects from
        Internet-connected servers, cache them and exchange them with others who are
        interested in them. The system exploits the fact that there is a high locality of
        information access within a geographic area. It aims to increase the data
        availability to participants with lost connectivity to the Internet. We discuss
        the main components of the system and possible applications. Finally, we present
        simulation results that show that the ad hoc networks can be very e$\#$ective in
        distributing popular information. 1 Introduction In a few years, a large
        percentage of the population in metropolitan areas will be equipped with PDAs,
        laptops or cell phones with built-in web browsers. Thus, access to information
        and entertainment will become as important as voice communications}, 
  www_section = {802.11, file-sharing}, 
  url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.36.5640}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/globecom00.pdf}, 
}
Pei00fisheyestate
@conference{Pei00fisheyestate,
  title = {Fisheye State Routing in Mobile Ad Hoc Networks}, 
  author = {Guangyu Pei and Mario Gerla and Tsu-Wei Chen}, 
  booktitle = {In ICDCS Workshop on Wireless Networks and Mobile Computing}, 
  year = {2000}, 
  pages = {71--78}, 
  abstract = {In this paper, we present a novel routing protocol for wireless ad hoc
        networks -- Fisheye State Routing (FSR). FSR introduces the notion of multi-level
        fisheye scope to reduce routing update overhead in large networks. Nodes exchange
        link state entries with their neighbors with a frequency which depends on
        distance to destination. From link state entries, nodes construct the topology
        map of the entire network and compute optimal routes. Simulation experiments show
        that FSR is simple, efficient and scalable routing solution in a mobile, ad hoc
        environment. 1 Introduction As the wireless and embedded computing technologies
        continue to advance, increasing numbers of small size and high performance
        computing and communication devices will be capable of tetherless communications
        and ad hoc wireless networking. An ad hoc wireless network is a selforganizing
        and self-configuring network with the capability of rapid deployment in response
        to application needs}, 
  www_section = {mobile Ad-hoc networks}, 
  url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.43.6730}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/05_75_fisheye-state-routing-in_0.pdf},
}
Perng04providingcontent-based
@conference{Perng04providingcontent-based,
  title = {Providing content-based services in a peer-to-peer environment}, 
  author = {Ginger Perng and Chenxi Wang and Michael K. Reiter}, 
  booktitle = {in Proceedings of the third International Workshop on Distributed
        Event-Based Systems (DEBS)}, 
  year = {2004}, 
  pages = {74--79}, 
  abstract = {Information dissemination in wide area networks has recently garnered much
        attention. Two differing models, publish/subscribe and rendezvous-based multicast
        atop overlay networks, have emerged as the two leading approaches for this goal.
        Event-based publish/subscribe supports contentbased services with powerful
        filtering capabilities, while peer-to-peer rendezvous-based services allow for
        efficient communication in a dynamic network infrastructure. We describe Reach, a
        system that integrates these two approaches to provide efficient and scalable
        content-based services in a dynamic network setting}, 
  url = {http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.92.4393\&rep=rep1\&type=pdf},
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/debs04perng.pdf}, 
%%%%% ERROR: Missing field
% www_section = {?????},
}
Petcu:2007:PNP:1625275.1625301
@conference{Petcu:2007:PNP:1625275.1625301,
  title = {PC-DPOP: a new partial centralization algorithm for distributed optimization}, 
  author = {Adrian Petcu and Boi Faltings and Mailler, Roger}, 
  booktitle = {IJCAI'07--Proceedings of the 20th international joint conference on
        Artifical intelligence}, 
  organization = {Morgan Kaufmann Publishers Inc}, 
  year = {2007}, 
  month = {January}, 
  address = {Hyderabad, India}, 
  pages = {167--172}, 
  publisher = {Morgan Kaufmann Publishers Inc}, 
  series = {IJCAI'07}, 
  abstract = {Fully decentralized algorithms for distributed constraint optimization often
        require excessive amounts of communication when applied to complex problems. The
        OptAPO algorithm of [Mailler and Lesser, 2004] uses a strategy of partial
        centralization to mitigate this problem. We introduce PC-DPOP, a new partial
        centralization technique, based on the DPOP algorithm of [Petcu and Faltings,
        2005]. PC-DPOP provides better control over what parts of the problem are
        centralized and allows this centralization to be optimal with respect to the
        chosen communication structure. Unlike OptAPO, PC-DPOP allows for a priory, exact
        predictions about privacy loss, communication, memory and computational
        requirements on all nodes and links in the network. Upper bounds on communication
        and memory requirements can be specified. We also report strong efficiency gains
        over OptAPO in experiments on three problem domains}, 
  www_section = {algorithms, distributed constraint optimization, DPOP, OptAPO, partial
        centralization technique}, 
  url = {http://dl.acm.org/citation.cfm?id=1625275.1625301}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/IJCAI\%2707\%20-\%20PC-DPOP.pdf},
}
Peterson03ext3cow:the
@booklet{Peterson03ext3cow:the,
  title = {Ext3cow: The Design, Implementation, and Analysis of Metadata for a
        Time-Shifting File System}, 
  author = {Zachary N. J. Peterson and Randal C. Burns}, 
  year = {2003}, 
  abstract = {The ext3cow file system, built on Linux's popular ext3 file system, brings
        snapshot functionality and file versioning to the open-source community. Our
        implementation of ext3cow has several desirable properties: ext3cow is
        implemented entirely in the file system and, therefore, does not modify kernel
        interfaces or change the operation of other file systems; ext3cow provides a
        time-shifting interface that permits access to data in the past without polluting
        the file system namespace; and, ext3cow creates versions of files on disk without
        copying data in memory. Experimental results show that the time-shifting
        functions of ext3cow do not degrade file system performance. Ext3cow performs
        comparably to ext3 on many file system benchmarks and trace driven experiments}, 
  www_section = {file systems}, 
  url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.10.2545}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.10.2545.pdf}, 
}
Pfister05capacity-achievingensembles
@article{Pfister05capacity-achievingensembles,
  title = {Capacity-achieving ensembles for the binary erasure channel with bounded
        complexity}, 
  author = {Henry D. Pfister and Igal Sason and R{\"u}diger L. Urbanke}, 
  journal = {IEEE TRANS. INFORMATION THEORY}, 
  volume = {51}, 
  number = {7}, 
  year = {2005}, 
  pages = {2352--2379}, 
  abstract = {We present two sequences of ensembles of nonsystematic irregular
        repeat--accumulate (IRA) codes which asymptotically (as their block length tends
        to infinity) achieve capacity on the binary erasure channel (BEC) with bounded
        complexity per information bit. This is in contrast to all previous constructions
        of capacity-achieving sequences of ensembles whose complexity grows at least like
        the log of the inverse of the gap (in rate) to capacity. The new bounded
        complexity result is achieved by puncturing bits, and allowing in this way a
        sufficient number of state nodes in the Tanner graph representing the codes. We
        derive an information-theoretic lower bound on the decoding complexity of
        randomly punctured codes on graphs. The bound holds for every memoryless
        binary-input output-symmetric (MBIOS) channel and is refined for the binary
        erasure channel}, 
  www_section = {BEC, coding theory, IRA, MBIOS}, 
  isbn = {0-7803-8280-3}, 
  doi = {10.1109/ISIT.2004.1365246}, 
  url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.90.3798}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/0409026v1.pdf}, 
}
Piatek:2007:IBR:1973430.1973431
@conference{Piatek:2007:IBR:1973430.1973431,
  title = {Do incentives build robustness in BitTorrent?}, 
  author = {Piatek, Michael and Isdal, Tomas and Anderson, Thomas and Krishnamurthy, Arvind
        and Venkataramani, Arun}, 
  booktitle = {NSDI'07. Proceedings of the 4th USENIX Conference on Networked Systems
        Design Implementation}, 
  organization = {USENIX Association}, 
  year = {2007}, 
  month = {April}, 
  address = {Cambridge, MA, USA}, 
  pages = {1--1}, 
  publisher = {USENIX Association}, 
  series = {NSDI'07}, 
  abstract = {A fundamental problem with many peer-to-peer systems is the tendency for
        users to "free ride"--to consume resources without contributing to the system.
        The popular file distribution tool BitTorrent was explicitly designed to address
        this problem, using a tit-for-tat reciprocity strategy to provide positive
        incentives for nodes to contribute resources to the swarm. While BitTorrent has
        been extremely successful, we show that its incentive mechanism is not robust to
        strategic clients. Through performance modeling parameterized by real world
        traces, we demonstrate that all peers contribute resources that do not directly
        improve their performance. We use these results to drive the design and
        implementation of BitTyrant, a strategic BitTorrent client that provides a median
        70\% performance gain for a 1 Mbit client on live Internet swarms. We further
        show that when applied universally, strategic clients can hurt average per-swarm
        performance compared to today's BitTorrent client implementations}, 
  url = {http://dl.acm.org/citation.cfm?id=1973430.1973431}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/NSDI\%2707\%20-\%20Do\%20incentives\%20build\%20robustness\%20in\%20BitTorrent.pdf},
%%%%% ERROR: Missing field
% www_section = {?????},
}
Piatek:2007:IBR:1973430.1973431_0
@conference{Piatek:2007:IBR:1973430.1973431_0,
  title = {Do incentives build robustness in BitTorrent?}, 
  author = {Piatek, Michael and Isdal, Tomas and Anderson, Thomas and Krishnamurthy, Arvind
        and Venkataramani, Arun}, 
  booktitle = {NSDI'07. Proceedings of the 4th USENIX Conference on Networked Systems
        Design Implementation}, 
  organization = {USENIX Association}, 
  year = {2007}, 
  month = {April}, 
  address = {Cambridge, MA, USA}, 
  pages = {1--1}, 
  publisher = {USENIX Association}, 
  series = {NSDI'07}, 
  abstract = {A fundamental problem with many peer-to-peer systems is the tendency for
        users to "free ride"--to consume resources without contributing to the system.
        The popular file distribution tool BitTorrent was explicitly designed to address
        this problem, using a tit-for-tat reciprocity strategy to provide positive
        incentives for nodes to contribute resources to the swarm. While BitTorrent has
        been extremely successful, we show that its incentive mechanism is not robust to
        strategic clients. Through performance modeling parameterized by real world
        traces, we demonstrate that all peers contribute resources that do not directly
        improve their performance. We use these results to drive the design and
        implementation of BitTyrant, a strategic BitTorrent client that provides a median
        70\% performance gain for a 1 Mbit client on live Internet swarms. We further
        show that when applied universally, strategic clients can hurt average per-swarm
        performance compared to today's BitTorrent client implementations}, 
  www_section = {BitTorrent, free riding, incentives, peer-to-peer networking}, 
  url = {http://dl.acm.org/citation.cfm?id=1973430.1973431}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/NSDI\%2707\%20-\%20Do\%20incentives\%20build\%20robustness\%20in\%20BitTorrent.pdf},
}
Plank:2009:PEE:1525908.1525927
@conference{Plank:2009:PEE:1525908.1525927,
  title = {A performance evaluation and examination of open-source erasure coding libraries
        for storage}, 
  author = {James S. Plank and Luo, Jianqiang and Schuman, Catherine D. and Lihao Xu and
        Wilcox-O'Hearn, Zooko}, 
  booktitle = {FAST'09--Proccedings of the 7th Conference on File and Storage
        Technologies}, 
  organization = {USENIX Association}, 
  year = {2009}, 
  month = {February}, 
  address = {San Francisco, CA, USA}, 
  pages = {253--265}, 
  publisher = {USENIX Association}, 
  abstract = {Over the past five years, large-scale storage installations have required
        fault-protection beyond RAID-5, leading to a flurry of research on and
        development of erasure codes for multiple disk failures. Numerous open-source
        implementations of various coding techniques are available to the general public.
        In this paper, we perform a head-to-head comparison of these implementations in
        encoding and decoding scenarios. Our goals are to compare codes and
        implementations, to discern whether theory matches practice, and to demonstrate
        how parameter selection, especially as it concerns memory, has a significant
        impact on a code's performance. Additional benefits are to give storage system
        designers an idea of what to expect in terms of coding performance when designing
        their storage systems, and to identify the places where further erasure coding
        research can have the most impact}, 
  www_section = {erasure coding, libraries, open-source, storage}, 
  url = {http://www.usenix.org/event/fast09/tech/full_papers/plank/plank_html/}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/FAST\%2709\%20-\%20Open-source\%20erasure\%20coding\%20libraries\%20for\%20storage.pdf},
}
Pouwelse05thebittorrent
@conference{Pouwelse05thebittorrent,
  title = {The BiTtorrent P2P File-sharing System: Measurements and Analysis}, 
  author = {Johan Pouwelse and Garbacki, Pawel and Epema, Dick H. J. and Henk J. Sips}, 
  booktitle = {IPTPS'05. Proceedings of the 4th International Workshop on Peer-To-Peer
        Systems}, 
  organization = {Springer}, 
  volume = {3640}, 
  year = {2005}, 
  month = {February}, 
  address = {Ithaca, NY, USA}, 
  pages = {205--216}, 
  publisher = {Springer}, 
  series = {Lecture Notes in Computer Science}, 
  abstract = {Of the many P2P file-sharing prototypes in existence, BitTorrent is one of
        the few that has managed to attract millions of users. BitTorrent relies on other
        (global) components for file search, employs a moderator system to ensure the
        integrity of file data, and uses a bartering technique for downloading in order
        to prevent users from freeriding. In this paper we present a measurement study of
        BitTorrent in which we focus on four issues, viz. availability, integrity,
        flashcrowd handling, and download performance. The purpose of this paper is to
        aid in the understanding of a real P2P system that apparently has the right
        mechanisms to attract a large user community, to provide measurement data that
        may be useful in modeling P2P systems, and to identify design issues in such
        systems}, 
  www_section = {BitTorrent, file-sharing}, 
  doi = {10.1007/11558989_19}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/IPTPS\%2705\%20-\%20The\%20BitTorrent\%3A\%20measurements\%20and\%20analysis.pdf},
}
Pouwelse:2008:TSP:1331115.1331119
@article{Pouwelse:2008:TSP:1331115.1331119,
  title = {TRIBLER: a Social-based Peer-to-Peer System}, 
  author = {Johan Pouwelse and Garbacki, Pawel and Jun Wang and Arno Bakker and Jie Yang
        and Alexandru Iosup and Epema, Dick H. J. and Marcel J. T. Reinders and van
        Steen, Maarten and Henk J. Sips}, 
  journal = {Concurrency and Computation: Practice \& Experience}, 
  volume = {20}, 
  year = {2008}, 
  month = {February}, 
  address = {Chichester, UK}, 
  pages = {127--138}, 
  publisher = {John Wiley and Sons Ltd}, 
  abstract = {Most current peer-to-peer (P2P) file-sharing systems treat their users as
        anonymous, unrelated entities, and completely disregard any social relationships
        between them. However, social phenomena such as friendship and the existence of
        communities of users with similar tastes or interests may well be exploited in
        such systems in order to increase their usability and performance. In this paper
        we present a novel social-based P2P file-sharing paradigm that exploits social
        phenomena by maintaining social networks and using these in content discovery,
        content recommendation, and downloading. Based on this paradigm's main concepts
        such as taste buddies and friends, we have designed and implemented the TRIBLER
        P2P file-sharing system as a set of extensions to BitTorrent. We present and
        discuss the design of TRIBLER, and we show evidence that TRIBLER enables fast
        content discovery and recommendation at a low additional overhead, and a
        significant improvement in download performance. Copyright {\textcopyright} 2007
        John Wiley \& Sons, Ltd}, 
  www_section = {peer-to-peer networking, social-based, taste buddies}, 
  issn = {1532-0626}, 
  doi = {http://dx.doi.org/10.1002/cpe.v20:2}, 
  url = {http://dl.acm.org/citation.cfm?id=1331115.1331119}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Concurrency\%20and\%20Computation\%20-\%20TRIBLER.pdf},
}
Prabhakar01buildinglow-diameter
@article{Prabhakar01buildinglow-diameter,
  title = {Building Low-Diameter P2P Networks}, 
  author = {Gopal Pandurangan and Prabhakar Raghavan and Eli Upfal}, 
  journal = {IEEE Journal on Selected Areas in Communications}, 
  volume = {21}, 
  year = {2003}, 
  month = {August}, 
  pages = {995--1002}, 
  abstract = {Scheme to build dynamic, distributed P2P networks of constant degree and
        logarithmic diameter}, 
  url = {http://www.cs.brown.edu/people/eli/papers/focs01.pdf}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/focs01.pdf}, 
%%%%% ERROR: Missing field
% www_section = {?????},
}
Prusty:2011:FIO:2046707.2046731
@conference{Prusty:2011:FIO:2046707.2046731,
  title = {Forensic investigation of the OneSwarm anonymous filesharing system}, 
  author = {Prusty, Swagatika and Brian Neil Levine and Marc Liberatore}, 
  booktitle = {Proceedings of the 18th ACM conference on Computer and communications
        security}, 
  organization = {ACM}, 
  year = {2011}, 
  address = {New York, NY, USA}, 
  pages = {201--214}, 
  publisher = {ACM}, 
  series = {CCS '11}, 
  abstract = {OneSwarm is a system for anonymous p2p file sharing in use by thousands of
        peers. It aims to provide Onion Routing-like privacy and BitTorrent-like
        performance. We demonstrate several flaws in OneSwarm's design and implementation
        through three different attacks available to forensic investigators. First, we
        prove that the current design is vulnerable to a novel timing attack that allows
        just two attackers attached to the same target to determine if it is the source
        of queried content. When attackers comprise 15\% of OneSwarm peers, we expect
        over 90\% of remaining peers will be attached to two attackers and therefore
        vulnerable. Thwarting the attack increases OneSwarm query response times, making
        them longer than the equivalent in Onion Routing. Second, we show that OneSwarm's
        vulnerability to traffic analysis by colluding attackers is much greater than was
        previously reported, and is much worse than Onion Routing. We show for this
        second attack that when investigators comprise 25\% of peers, over 40\% of the
        network can be investigated with 80\% precision to find the sources of content.
        Our examination of the OneSwarm source code found differences with the technical
        paper that significantly reduce security. For the implementation in use by
        thousands of people, attackers that comprise 25\% of the network can successfully
        use this second attack against 98\% of remaining peers with 95\% precision.
        Finally, we show that a novel application of a known TCP-based attack allows a
        single attacker to identify whether a neighbor is the source of data or a proxy
        for it. Users that turn off the default rate-limit setting are exposed. Each
        attack can be repeated as investigators leave and rejoin the network. All of our
        attacks are successful in a forensics context: Law enforcement can use them
        legally ahead of a warrant. Furthermore, private investigators, who have fewer
        restrictions on their behavior, can use them more easily in pursuit of evidence
        for such civil suits as copyright infringement}, 
  www_section = {anonymity, OneSwarm, p2p network}, 
  isbn = {978-1-4503-0948-6}, 
  doi = {http://doi.acm.org/10.1145/2046707.2046731}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/prusty.ccs_.2011.pdf}, 
  url = {https://bibliography.gnunet.org}, 
}
Qiu:2003:SRI:863955.863974
@conference{Qiu:2003:SRI:863955.863974,
  title = {On selfish routing in internet-like environments}, 
  author = {Lili Qiu and Yang, Yang Richard and Zhang, Yin and S Shenker}, 
  booktitle = {SIGCOMM'03. Proceedings of the 2003 conference on Applications,
        technologies, architectures, and protocols for computer communications}, 
  organization = {ACM}, 
  year = {2003}, 
  month = {August}, 
  address = {Karlsruhe, Germany}, 
  pages = {151--162}, 
  publisher = {ACM}, 
  series = {SIGCOMM '03}, 
  abstract = {A recent trend in routing research is to avoid inefficiencies in
        network-level routing by allowing hosts to either choose routes themselves (e.g.,
        source routing) or use overlay routing networks (e.g., Detour or RON). Such
        approaches result in selfish routing, because routing decisions are no longer
        based on system-wide criteria but are instead designed to optimize host-based or
        overlay-based metrics. A series of theoretical results showing that selfish
        routing can result in suboptimal system behavior have cast doubts on this
        approach. In this paper, we use a game-theoretic approach to investigate the
        performance of selfish routing in Internet-like environments. We focus on
        intra-domain network environments and use realistic topologies and traffic
        demands in our simulations. We show that in contrast to theoretical worst cases,
        selfish routing achieves close to optimal average latency in such environments.
        However, such performance benefit comes at the expense of significantly increased
        congestion on certain links. Moreover, the adaptive nature of selfish overlays
        can significantly reduce the effectiveness of traffic engineering by making
        network traffic less predictable}, 
  www_section = {game theory, optimization, overlay, relaxation, selfish routing, traffic
        engineering, traffic equilibrium}, 
  isbn = {1-58113-735-4}, 
  doi = {http://doi.acm.org/10.1145/863955.863974}, 
  url = {http://doi.acm.org/10.1145/863955.863974}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/SIGCOMM\%2703\%20-\%20On\%20selfish\%20routing\%20in\%20internet-like\%20environments.pdf},
}
Qiu:2004:MPA:1015467.1015508
@conference{Qiu:2004:MPA:1015467.1015508,
  title = {Modeling and performance analysis of BitTorrent-like peer-to-peer networks}, 
  author = {Qiu, Dongyu and Rayadurgam Srikant}, 
  booktitle = {SIGCOMM'04. Proceedings of the 2004 Conference on Applications,
        Technologies, Architectures, and Protocols for Computer Communications}, 
  organization = {ACM}, 
  year = {2004}, 
  month = {August}, 
  address = {Portland, Oregon, USA}, 
  pages = {367--378}, 
  publisher = {ACM}, 
  series = {SIGCOMM '04}, 
  abstract = {In this paper, we develop simple models to study the performance of
        BitTorrent, a second generation peer-to-peer (P2P) application. We first present
        a simple fluid model and study the scalability, performance and efficiency of
        such a file-sharing mechanism. We then consider the built-in incentive mechanism
        of BitTorrent and study its effect on network performance. We also provide
        numerical results based on both simulations and real traces obtained from the
        Internet}, 
  www_section = {fluid model, game theory, peer-to-peer networking}, 
  isbn = {1-58113-862-8}, 
  doi = {http://doi.acm.org/10.1145/1015467.1015508}, 
  url = {http://doi.acm.org/10.1145/1015467.1015508}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/SIGCOMM\%2704\%20-\%20Qui\%20\%26\%20Srikant\%20-\%20Modeling\%20and\%20performance\%20analysis.pdf},
}
R5N
@conference{R5N,
  title = {R5N : Randomized Recursive Routing for Restricted-Route Networks}, 
  author = {Nathan S Evans and Christian Grothoff}, 
  booktitle = {5th International Conference on Network and System Security (NSS 2011)}, 
  organization = {IEEE}, 
  year = {2011}, 
  month = {September}, 
  address = {Milan, Italy}, 
  publisher = {IEEE}, 
  abstract = {This paper describes a new secure DHT routing algorithm for open,
        decentralized P2P networks operating in a restricted-route environment with
        malicious participants. We have implemented our routing algorithm and have
        evaluated its performance under various topologies and in the presence of
        malicious peers. For small-world topologies, our algorithm provides significantly
        better performance when compared to existing methods. In more densely connected
        topologies, our performance is better than or on par with other designs}, 
  www_section = {distributed hash table, GNUnet, R5N, routing}, 
  www_tags = {selected}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/nss2011.pdf}, 
  url = {https://bibliography.gnunet.org}, 
}
RP03-1
@conference{RP03-1,
  title = {Practical Anonymity for the Masses with Mix-Networks}, 
  author = {Marc Rennhard and Bernhard Plattner}, 
  booktitle = {Proceedings of the IEEE 8th Intl. Workshop on Enterprise Security (WET ICE
        2003)}, 
  organization = {IEEE Computer Society Washington, DC, USA}, 
  year = {2003}, 
  month = {June}, 
  address = {Linz, Austria}, 
  publisher = {IEEE Computer Society Washington, DC, USA}, 
  abstract = {Designing mix-networks for low-latency applicationsthat offer acceptable
        performance and provide good resistanceagainst attacks without introducing too
        much over-headis very difficult. Good performance and small over-headsare vital
        to attract users and to be able to supportmany of them, because with only a few
        users, there is noanonymity at all. In this paper, we analyze how well
        differentkinds of mix-networks are suited to provide practicalanonymity for a
        very large number of users}, 
  www_section = {performance}, 
  isbn = {0-7695-1963-6}, 
  url = {http://portal.acm.org/citation.cfm?id=938984.939808}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/RP03-1.pdf}, 
}
RRMPH02-1
@conference{RRMPH02-1,
  title = {Analysis of an Anonymity Network for Web Browsing}, 
  author = {Marc Rennhard and Sandro Rafaeli and Laurent Mathy and Bernhard Plattner and
        David Hutchison}, 
  booktitle = {Proceedings of the IEEE 7th Intl. Workshop on Enterprise Security (WET ICE
        2002)}, 
  organization = {IEEE Computer Society Washington, DC, USA}, 
  year = {2002}, 
  month = {June}, 
  address = {Pittsburgh, USA}, 
  pages = {49--54}, 
  publisher = {IEEE Computer Society Washington, DC, USA}, 
  abstract = {Various systems offering anonymity for near real-time Internet traffic have
        been operational. However, they did not deliver many quantitative results about
        performance, bandwidth overhead, or other issues that arise when implementing or
        operating such a system. Consequently, the problem of designing and operating
        these systems in a way that they provide a good balance between usability,
        protection from attacks, and overhead is not well understood. In this paper, we
        present the analysis of an anonymity network for web browsing that offers a high
        level of anonymity against a sophisticated attacker and good end-to-end
        performance at a reasonable bandwidth overhead. We describe a novel way of
        operating the system that maximizes the protection from traffic analysis attacks
        while minimizing the bandwidth overhead. We deliver quantitative results about
        the performance of our system, which should help to give a better understanding
        of anonymity networks}, 
  www_section = {anonymity, anonymous web browsing}, 
  isbn = {0-7695-1748-X}, 
  url = {http://portal.acm.org/citation.cfm?id=759973}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/RRMPH02-1.pdf}, 
}
Ramanathan95thefinal
@booklet{Ramanathan95thefinal,
  title = {The final frontier: Embedding networked sensors in the soil}, 
  author = {Nithya Ramanathan and Tom Schoellhammer and Deborah Estrin and Mark Hansen and
        Tom Harmon and Eddie Kohler and Mani Srivastava}, 
  year = {1995}, 
  publisher = {Lecture Notes in Computer Science}, 
  abstract = {This paper presents the first systematic design of a robust sensing system
        suited for the challenges presented by soil environments. We describe three soil
        deployments we have undertaken: in Bangladesh, and in California at the James
        Reserve and in the San Joaquin River basin. We discuss our experiences and
        lessons learned in deploying soil sensors. We present data from each deployment
        and evaluate our techniques for improving the information yield from these
        systems. Our most notable results include the following: in-situ calibration
        techniques to postpone labor-intensive and soil disruptive calibration events
        developed at the James Reserve; achieving a 91 \% network yield from a Mica2
        wireless sensing system without end-to-end reliability in Bangladesh; and the
        javelin, a new platform that facilitates the deployment, replacement and in-situ
        calibration of soil sensors, deployed in the San Joaquin River basin. Our
        techniques to increase information yield have already led to scientifically
        promising results, including previously unexpected diurnal cycles in various soil
        chemistry parameters across several deployments}, 
  www_section = {sensor networks, wireless sensor network}, 
  url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.120.7766}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.120.7766.pdf}, 
}
RatnasamyHellersteinShenker2003RangeQueries
@booklet{RatnasamyHellersteinShenker2003RangeQueries,
  title = {Range Queries over DHTs}, 
  author = {Ratnasamy, Sylvia and Hellerstein, Joseph M. and S Shenker}, 
  year = {2003}, 
  abstract = {Distributed Hash Tables (DHTs) are scalable peer-to-peer systems that support
        exact match lookups. This paper describes the construction and use of a Prefix
        Hash Tree (PHT) -- a distributed data structure that supports range queries over
        DHTs. PHTs use the hash-table interface of DHTs to construct a search tree that
        is efficient (insertions/lookups take \#\#\#\#\# \#\#\# \#\#\#\# DHT lookups,
        where D is the data domain being indexed) and robust (the failure of any given
        node in the search tree does not affect the availability of data stored at other
        nodes in the PHT)}, 
  www_section = {distributed hash table, P2P, queries, range}, 
  url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.6.243}, 
}
RePEc:bla:restud:v:66:y:1999:i:1:p:3-21
@article{RePEc:bla:restud:v:66:y:1999:i:1:p:3-21,
  title = {The Theory of Moral Hazard and Unobservable Behaviour: Part I}, 
  author = {Mirrlees, James A.}, 
  journal = {Review of Economic Studies}, 
  volume = {66}, 
  number = {1}, 
  year = {1999}, 
  month = {January}, 
  pages = {3--21}, 
  abstract = {This article presents information on principal-agent models in which outcomes
        conditional on the agent's action are uncertain, and the agent's behavior
        therefore unobservable. For a model with bounded agent's utility, conditions are
        given under which the first-best equilibrium can be approximated arbitrarily
        closely by contracts relating payment to observable outcomes. For general models,
        it is shown that the solution may not always be obtained by using the agent's
        first-order conditions as constraint. General conditions of Lagrangean type are
        given for problems in which contracts are finite-dimensional}, 
  www_section = {contracts, Lagrangean conditions, unobservability}, 
  url = {http://econpapers.repec.org/RePEc:bla:restud:v:66:y:1999:i:1:p:3-21}, 
}
Reed98anonymousconnections
@article{Reed98anonymousconnections,
  title = {Anonymous Connections and Onion Routing}, 
  author = {Michael Reed and Paul Syverson and David Goldschlag}, 
  journal = {IEEE Journal on Selected Areas in Communications}, 
  volume = {16}, 
  year = {1998}, 
  pages = {482--494}, 
  abstract = {Onion Routing is an infrastructure for private communication over a public
        network. It provides anonymous connections that are strongly resistant to both
        eavesdropping and traffic analysis. Onion routing's anonymous connections are
        bidirectional and near realtime, and can be used anywhere a socket connection can
        be used. Any identifying information must be in the data stream carried over an
        anonymous connection. An onion is a data structure that is treated as the
        destination address by onion routers; thus, it is used to establish an anonymous
        connection. Onions themselves appear differently to each onion router as well as
        to network observers. The same goes for data carried over the connections they
        establish. Proxy aware applications, such as web browsing and e-mail, require no
        modification to use onion routing, and do so through a series of proxies. A
        prototype onion routing network is running between our lab and other sites. This
        paper describes anonymous connections and their imple}, 
  www_section = {anonymity, onion routing}, 
  url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.35.2362}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.65.8267.pdf}, 
}
Reiter98crowds:anonymity
@article{Reiter98crowds:anonymity,
  title = {Crowds: Anonymity for web transactions}, 
  author = {Michael K. Reiter and Aviel D. Rubin}, 
  journal = {ACM Transactions on Information and System Security}, 
  volume = {1}, 
  year = {1998}, 
  pages = {66--92}, 
  abstract = {Crowds is a system that allows anonymous web-surfing. For each host, a random
        static path through the crowd is formed that then acts as a sequence of proxies,
        indirecting replies and responses. Vulnerable when facing adversaries that can
        perform traffic analysis at the local node and without responder anonymity. But
        highly scalable and efficient}, 
  www_section = {anonymous web browsing, Crowds}, 
  url = {http://avirubin.com/crowds.pdf}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/crowds.pdf}, 
}
Resnick:2000:RS:355112.355122
@article{Resnick:2000:RS:355112.355122,
  title = {Reputation systems}, 
  author = {Paul Resnick and Kuwabara, Ko and Zeckhauser, Richard and Eric Friedman}, 
  journal = {Communications of the ACM}, 
  volume = {43}, 
  year = {2000}, 
  month = {December}, 
  address = {New York, NY, USA}, 
  pages = {45--48}, 
  publisher = {ACM}, 
  www_section = {reputation systems}, 
  issn = {0001-0782}, 
  doi = {http://doi.acm.org/10.1145/355112.355122}, 
  url = {http://doi.acm.org/10.1145/355112.355122}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Communications\%20of\%20the\%20ACM\%20-\%20Reputation\%20Systems.pdf},
}
Resnick:2009:STT:1566374.1566423
@conference{Resnick:2009:STT:1566374.1566423,
  title = {Sybilproof Transitive Trust Protocols}, 
  author = {Paul Resnick and Sami, Rahul}, 
  booktitle = {EC'09. Proceedings of the 10th ACM Conference on Electronic commerce}, 
  organization = {ACM}, 
  year = {2009}, 
  month = {July}, 
  address = {Stanford, California, USA}, 
  pages = {345--354}, 
  publisher = {ACM}, 
  series = {EC '09}, 
  abstract = {We study protocols to enable one user (the principal) to make potentially
        profitable but risky interactions with another user (the agent), in the absence
        of direct trust between the two parties. In such situations, it is possible to
        enable the interaction indirectly through a chain of credit or "trust" links. We
        introduce a model that provides insight into many disparate applications,
        including open currency systems, network trust aggregation systems, and
        manipulation-resistant recommender systems. Each party maintains a trust account
        for each other party. When a principal's trust balance for an agent is high
        enough to cover potential losses from a bad interaction, direct trust is
        sufficient to enable the interaction. Allowing indirect trust opens up more
        interaction opportunities, but also expands the strategy space of an attacker
        seeking to exploit the community for its own ends. We show that with indirect
        trust exchange protocols, some friction is unavoidable: any protocol that
        satisfies a natural strategic safety property that we call sum-sybilproofness can
        sometimes lead to a reduction in expected overall trust balances even on
        interactions that are profitable in expectation. Thus, for long-term growth of
        trust accounts, which are assets enabling risky but valuable interactions, it may
        be necessary to limit the use of indirect trust. We present the hedged-transitive
        protocol and show that it achieves the optimal rate of expected growth in trust
        accounts, among all protocols satisfying the sum-sybilproofness condition}, 
  www_section = {indirect reciprocity, open currency, recommender system, reputation
        system, sybilproof, transitive trust}, 
  isbn = {978-1-60558-458-4}, 
  doi = {http://doi.acm.org/10.1145/1566374.1566423}, 
  url = {http://doi.acm.org/10.1145/1566374.1566423}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/EC\%2709\%20-\%20Sybilproof\%20transitive\%20trust\%20protocols.pdf},
}
Rivest00trusteconomies
@booklet{Rivest00trusteconomies,
  title = {Trust Economies in The Free Haven Project}, 
  author = {Ron Rivest and Arthur C. Smith and Brian T. Sniffen}, 
  year = {2000}, 
  abstract = {The Free Haven Project aims to deploy a system for distributed data storage
        which is robust against attempts by powerful adversaries to find and destroy
        stored data. Free Haven uses a secure mixnet for communication, and it emphasizes
        distributed, reliable, and anonymous storage over e$\#$cient retrieval. We
        provide a system for building trust between pseudonymous entities, based entirely
        on records of observed behavior. Modelling these observed behaviors as an economy
        allows us to draw heavily on previous economic theory, as well as on existing
        data havens which base their accountability on financial loss. This trust system
        provides a means of enforcing accountability without sacrificing anonymity}, 
  url = {http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.27.1639\&rep=rep1\&type=pdf},
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.27.1639\%20\%282\%29.pdf},
%%%%% ERROR: Missing field
% www_section = {?????},
}
Rowstron01pastry:scalable
@conference{Rowstron01pastry:scalable,
  title = {Pastry: Scalable, decentralized object location and routing for large-scale
        peer-to-peer systems}, 
  author = {Antony Rowstron and Peter Druschel}, 
  booktitle = {Middleware'01--Proceedings of the IFIP/ACM International Conference on
        Distributed Systems Platforms}, 
  organization = {Springer-Verlag}, 
  volume = {2218}, 
  year = {2001}, 
  month = {November}, 
  address = {Heidelberg, Germany}, 
  pages = {329--350}, 
  publisher = {Springer-Verlag}, 
  series = {Lecture Notes in Computer Science}, 
  abstract = {This paper presents the design and evaluation of Pastry, a scalable,
        distributed object location and routing substrate for wide-area peer-to-peer
        applications. Pastry performs application-level routing and object location in a
        potentially very large overlay network of nodes connected via the Internet. It
        can be used to support a variety of peer-to-peer applications, including global
        data storage, data sharing, group communication and naming. Each node in the
        Pastry network has a unique identifier (nodeId). When presented with a message
        and a key, a Pastry node efficiently routes the message to the node with a nodeId
        that is numerically closest to the key, among all currently live Pastry nodes.
        Each Pastry node keeps track of its immediate neighbors in the nodeId space, and
        notifies applications of new node arrivals, node failures and recoveries. Pastry
        takes into account network locality; it seeks to minimize the distance messages
        travel, according to a to scalar proximity metric like the number of IP routing
        hops Pastry is completely decentralized, scalable, and self-organizing; it
        automatically adapts to the arrival, departure and failure of nodes. Experimental
        results obtained with a prototype implementation on an emulated network of up to
        100,000 nodes confirm Pastry's scalability and efficiency, its ability to
        self-organize and adapt to node failures, and its good network locality
        properties Work done in part while visiting Microsoft Research, Cambridge, UK}, 
  www_section = {distributed hash table, Pastry}, 
  isbn = {3-540-42800-3}, 
  doi = {10.1007/3-540-45518-3_18}, 
  url = {http://www.cs.rice.edu/~druschel/publications/Pastry.pdf}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Pastry.pdf}, 
}
Rubenstein:2000:DSC:345063.339410
@article{Rubenstein:2000:DSC:345063.339410,
  title = {Detecting shared congestion of flows via end-to-end measurement}, 
  author = {Rubenstein, Dan and Kurose, Jim and Don Towsley}, 
  journal = {IEEE/ACM Transactions on Networking}, 
  volume = {10}, 
  year = {2002}, 
  month = {June}, 
  address = {New York, NY, USA}, 
  pages = {381--395}, 
  publisher = {ACM}, 
  abstract = {Current Internet congestion control protocols operate independently on a
        per-flow basis. Recent work has demonstrated that cooperative congestion control
        strategies between flows can improve performance for a variety of applications,
        ranging from aggregated TCP transmissions to multiple-sender multicast
        applications. However, in order for this cooperation to be effective, one must
        first identify the flows that are congested at the same set of resources. We
        present techniques based on loss or delay observations at end hosts to infer
        whether or not two flows experiencing congestion are congested at the same
        network resources. Our novel result is that such detection can be achieved for
        unicast flows, but the techniques can also be applied to multicast flows. We
        validate these techniques via queueing analysis, simulation and experimentation
        within the Internet. In addition, we demonstrate preliminary simulation results
        that show that the delay-based technique can determine whether two TCP flows are
        congested at the same set of resources. We also propose metrics that can be used
        as a measure of the amount of congestion sharing between two flows}, 
  www_section = {end-to-end measurement, flow, internet congestion protocols, per-flow,
        shared congestion}, 
  issn = {1063-6692}, 
  doi = {http://dx.doi.org/10.1109/TNET.2002.1012369}, 
  url = {http://dx.doi.org/10.1109/TNET.2002.1012369}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/IEEE\%E2\%81\%84ACM\%20Transactions\%20on\%20Networking\%20-\%20Detecting\%20shared\%20congestion\%20of\%20flows.pdf},
}
SK
@conference{SK,
  title = {Receipt-Free MIX-Type Voting Scheme--A Practical Solution to the Implementation
        of a Voting Booth}, 
  author = {Joe Kilian and Kazue Sako}, 
  booktitle = {Proceedings of EUROCRYPT 1995}, 
  organization = {Springer-Verlag}, 
  year = {1995}, 
  publisher = {Springer-Verlag}, 
  abstract = {We present a receipt-free voting scheme based on a mix- type anonymous
        channel [Cha81, PIK93]. The receipt-freeness property [BT94] enables voters to
        hide how they have voted even from a powerful adversary who is trying to coerce
        him. The work of [BT94] gave the first solution using a voting booth, which is a
        hardware assumption not unlike that in current physical elections. In our
        proposed scheme, we reduce the physical assumptions required to obtain
        receipt-freeness. Our sole physical assumption is the existence of a private
        channel through which the center can send the voter a message without fear of
        eavesdropping}, 
  isbn = {978-3-540-59409-3}, 
  doi = {10.1007/3-540-49264-X}, 
  url = {http://www.springerlink.com/content/jhf7ccxn2fj2gfum/}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/SK.pdf}, 
  www_section = {Unsorted}, 
}
SN03
@conference{SN03,
  title = {On the Anonymity of Timed Pool Mixes}, 
  author = {Andrei Serjantov and Richard E. Newman}, 
  booktitle = {Proceedings of the Workshop on Privacy and Anonymity Issues in Networked and
        Distributed Systems}, 
  organization = {Kluwer}, 
  year = {2003}, 
  month = {May}, 
  address = {Athens, Greece}, 
  pages = {427--434}, 
  publisher = {Kluwer}, 
  abstract = {This paper presents a method for calculating the anonymity of a timed pool
        mix. Thus we are able to compare it to a threshold pool mix, and any future mixes
        that might be developed. Although we are only able to compute the anonymity of a
        timed pool mix after some specic number of rounds, this is a practical
        approximation to the real anonymity}, 
  www_section = {anonymity, mix}, 
  url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.9.5699}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.9.5699.pdf}, 
}
SS03
@conference{SS03,
  title = {Passive Attack Analysis for Connection-Based Anonymity Systems}, 
  author = {Andrei Serjantov and Peter Sewell}, 
  booktitle = {Proceedings of ESORICS 2003}, 
  organization = {Springer Berlin / Heidelberg}, 
  year = {2003}, 
  month = {October}, 
  publisher = {Springer Berlin / Heidelberg}, 
  abstract = {In this paper we consider low latency connection-based anonymity systems
        which can be used for applications like web browsing or SSH. Although several
        such systems have been designed and built, their anonymity has so far not been
        adequately evaluated. We analyse the anonymity of connection-based systems
        against passive adversaries. We give a precise description of two attacks,
        evaluate their effectiveness, and calculate the amount of traffic necessary to
        provide a minimum degree of protection against them}, 
  www_section = {anonymity}, 
  isbn = {978-3-540-20300-1}, 
  doi = {10.1007/b13237}, 
  url = {http://www.springerlink.com/content/8jva7vy8tkert9ur/}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.5.2005.pdf}, 
}
Saia02dynamicallyfault-tolerant
@booklet{Saia02dynamicallyfault-tolerant,
  title = {Dynamically Fault-Tolerant Content Addressable Networks}, 
  author = {Jared Saia and Amos Fiat and Steven D. Gribble and Anna R. Karlin and Stefan
        Saroiu}, 
  year = {2002}, 
  abstract = {We describe a content addressable network which is robust in the face of
        massive adversarial attacks and in a highly dynamic environment. Our network is
        robust in the sense that at any time, an arbitrarily large fraction of the peers
        can reach an arbitrarily large fraction of the data items. The network can be
        created and maintained in a completely distributed fashion}, 
  www_section = {fault-tolerance, robustness}, 
  doi = {10.1007/3-540-45748-8}, 
  url = {http://www.springerlink.com/content/r7fumjuwmgnd4md1/}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/180.pdf}, 
}
Saito:2004:MTP:968884.969522
@mastersthesis{Saito:2004:MTP:968884.969522,
  title = {i-WAT: The Internet WAT System--An Architecture for Maintaining Trust and
        Facilitating Peer-to-Peer Barter Relationships}, 
  author = {Saito, Kenji}, 
  school = {Keio University,}, 
  volume = {Philosophy (Media and Governance)}, 
  year = {2006}, 
  month = {January}, 
  address = {Washington, DC, USA}, 
  pages = {0--231}, 
  keywords = {i-WAT, OpenPGP, WAT system}, 
  url = {http://www.sfc.wide.ad.jp/dissertation/ks91_e.html}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Saito\%20-\%20i-WAT\%20Dissertation.pdf},
%%%%% ERROR: Missing field
% www_section = {?????},
}
Saito:2006:FTI:1130897.1131000
@conference{Saito:2006:FTI:1130897.1131000,
  title = {Fair Trading of Information: A Proposal for the Economics of Peer-to-Peer
        Systems}, 
  author = {Saito, Kenji and Morino, Eiichi and Murai, Jun}, 
  booktitle = {ARES'06. Proceedings of the First International Conference on Availability,
        Reliability and Security}, 
  organization = {IEEE Computer Society}, 
  year = {2006}, 
  month = {April}, 
  address = {Vienna, Austria}, 
  pages = {764--771}, 
  publisher = {IEEE Computer Society}, 
  abstract = {A P2P currency can be a powerful tool for promoting exchanges in a trusted
        way that make use of under-utilized resources both in computer networks and in
        real life. There are three classes of resource that can be exchanged in a P2P
        system: atoms (ex. physical goods by way of auctions), bits (ex. data files) and
        presences (ex. time slots for computing resources such as CPU, storage or
        bandwidth). If these are equally treated as commodities, however, the economy of
        the system is likely to collapse, because data files can be reproduced at a
        negligibly small cost whereas time slots for computing resources cannot even be
        stockpiled for future use. This paper clarifies this point by simulating a small
        world of traders, and proposes a novel way for applying the "reduction over time"
        feature[14] of i-WAT[11], a P2P currency. In the proposed new economic order
        (NEO), bits are freely shared among participants, whereas their producers are
        supported by peers, being given freedom to issue exchange tickets whose values
        are reduced over time}, 
  isbn = {0-7695-2567-9}, 
  doi = {10.1109/ARES.2006.62}, 
  url = {http://dl.acm.org/citation.cfm?id=1130897.1131000}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/ARES\%2706\%20-\%20Fair\%20Trading\%20of\%20Information.pdf},
%%%%% ERROR: Missing field
% www_section = {?????},
}
Saito:2006:FTI:1130897.1131000_0
@conference{Saito:2006:FTI:1130897.1131000_0,
  title = {Fair Trading of Information: A Proposal for the Economics of Peer-to-Peer
        Systems}, 
  author = {Saito, Kenji and Morino, Eiichi and Murai, Jun}, 
  booktitle = {ARES'06. Proceedings of the First International Conference on Availability,
        Reliability and Security}, 
  organization = {IEEE Computer Society}, 
  year = {2006}, 
  month = {April}, 
  address = {Vienna, Austria}, 
  pages = {764--771}, 
  publisher = {IEEE Computer Society}, 
  abstract = {A P2P currency can be a powerful tool for promoting exchanges in a trusted
        way that make use of under-utilized resources both in computer networks and in
        real life. There are three classes of resource that can be exchanged in a P2P
        system: atoms (ex. physical goods by way of auctions), bits (ex. data files) and
        presences (ex. time slots for computing resources such as CPU, storage or
        bandwidth). If these are equally treated as commodities, however, the economy of
        the system is likely to collapse, because data files can be reproduced at a
        negligibly small cost whereas time slots for computing resources cannot even be
        stockpiled for future use. This paper clarifies this point by simulating a small
        world of traders, and proposes a novel way for applying the "reduction over time"
        feature[14] of i-WAT[11], a P2P currency. In the proposed new economic order
        (NEO), bits are freely shared among participants, whereas their producers are
        supported by peers, being given freedom to issue exchange tickets whose values
        are reduced over time}, 
  www_section = {economics, information trading}, 
  isbn = {0-7695-2567-9}, 
  doi = {10.1109/ARES.2006.62}, 
  url = {http://dl.acm.org/citation.cfm?id=1130897.1131000}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/ARES\%2706\%20-\%20Fair\%20Trading\%20of\%20Information.pdf},
}
Salsa
@conference{Salsa,
  title = {Salsa: A Structured Approach to Large-Scale Anonymity}, 
  author = {Arjun Nambiar}, 
  booktitle = {Proceedings of CCS 2006}, 
  organization = {ACM New York, NY, USA}, 
  year = {2006}, 
  month = {October}, 
  publisher = {ACM New York, NY, USA}, 
  abstract = {Highly distributed anonymous communications systems have the promise to
        reduce the effectiveness of certain attacks and improve scalability over more
        centralized approaches. Existing approaches, however, face security and
        scalability issues. Requiring nodes to have full knowledge of the other nodes in
        the system, as in Tor and Tarzan, limits scalability and can lead to intersection
        attacks in peer-to-peer configurations. MorphMix avoids this requirement for
        complete system knowledge, but users must rely on untrusted peers to select the
        path. This can lead to the attacker controlling the entire path more often than
        is acceptable.To overcome these problems, we propose Salsa, a structured approach
        to organizing highly distributed anonymous communications systems for scalability
        and security. Salsa is designed to select nodes to be used in anonymous circuits
        randomly from the full set of nodes, even though each node has knowledge of only
        a subset of the network. It uses a distributed hash table based on hashes of the
        nodes' IP addresses to organize the system. With a virtual tree structure,
        limited knowledge of other nodes is enough to route node lookups throughout the
        system. We use redundancy and bounds checking when performing lookups to prevent
        malicious nodes from returning false information without detection. We show that
        our scheme prevents attackers from biasing path selection, while incurring
        moderate overheads, as long as the fraction of malicious nodes is less than 20\%.
        Additionally, the system prevents attackers from obtaining a snapshot of the
        entire system until the number of attackers grows too large (e.g. 15\% for 10000
        peers and 256 groups). The number of groups can be used as a tunable parameter in
        the system, depending on the number of peers, that can be used to balance
        performance and security}, 
  www_section = {P2P, privacy}, 
  isbn = {1-59593-518-5}, 
  doi = {10.1145/1180405.1180409}, 
  url = {http://portal.acm.org/citation.cfm?id=1180409}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Salsa.pdf}, 
}
Sandberg05searchingin
@booklet{Sandberg05searchingin,
  title = {Searching in a Small World}, 
  author = {Sandberg, Oskar}, 
  year = {2005}, 
  abstract = {The small-world phenomenon, that the world's social network is tightly
        connected, and that any two people can be linked by a short chain of friends, has
        long been a subject of interest. Famously, the psychologist Stanley Milgram
        performed an experiment where he asked people to deliver a letter to a stranger
        by forwarding it to an acquaintance, who could forward it to one his
        acquaintances, and so on until the destination was reached. The results seemed to
        confirm that the small-world phenomenon is real. Recently it has been shown by
        Jon Kleinberg that in order to search in a network, that is to actually find the
        short paths in the manner of the Milgram experiment, a very special type of a
        graph model is needed. In this thesis, we present two ideas about searching in
        the small world stemming from Kleinberg's results. In the first we study the
        formation of networks of this type, attempting to see why the kind}, 
  www_section = {small-world}, 
  url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.101.688}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.101.688.pdf}, 
}
Sanghavi:2005:NMF:1080192.1080200
@conference{Sanghavi:2005:NMF:1080192.1080200,
  title = {A new mechanism for the free-rider problem}, 
  author = {Sanghavi, Sujay and Hajek, Bruce}, 
  booktitle = {P2PEcon'05. Proceedings of the 2005 ACM SIGCOMM Workshop on Economics of
        Peer-to-Peer Systems}, 
  organization = {ACM}, 
  year = {2005}, 
  month = {August}, 
  address = {Philadelphia, Pennsylvania, USA}, 
  pages = {122--127}, 
  publisher = {ACM}, 
  series = {P2PECON '05}, 
  abstract = {The free-rider problem arises in the provisioning of public resources, when
        users of the resource have to contribute towards the cost of production. Selfish
        users may have a tendency to misrepresent preferences -- so as to minimize
        individual contributions -- leading to inefficient levels of production of the
        resource. Groves and Loeb formulated a classic model capturing this problem, and
        proposed (what later came to be known as) the VCG mechanism as a solution.
        However, in the presence of heterogeneous users and communication constraints, or
        in decentralized settings, implementing this mechanism places an unrealistic
        communication burden. In this paper we propose a class of alternative mechanisms
        for the same problem as considered by Groves and Loeb, but with the added
        constraint of severely limited communication between users and the provisioning
        authority. When these mechanisms are used, efficient production is ensured as a
        Nash equilibrium outcome, for a broad class of users. Furthermore, a natural bid
        update strategy is shown to globally converge to efficient Nash equilibria. An
        extension to multiple public goods with inter-related valuations is also
        presented}, 
  www_section = {free-rider, problem}, 
  isbn = {1-59593-026-4}, 
  doi = {http://doi.acm.org/10.1145/1080192.1080200}, 
  url = {http://doi.acm.org/10.1145/1080192.1080200}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/P2PEcon\%2705\%20-\%20A\%20new\%20mechanism\%20for\%20the\%20free-rider\%20problem.pdf},
}
Saroiu02ameasurement
@conference{Saroiu02ameasurement,
  title = {A Measurement Study of Peer-to-Peer File Sharing Systems}, 
  author = {Stefan Saroiu and P. Krishna Gummadi and Steven D. Gribble}, 
  booktitle = {Multimedia Computing and Networking (MMCN),}, 
  year = {2002}, 
  month = {January}, 
  address = {San Jose}, 
  url = {http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.61.4223\&rep=rep1\&type=pdf},
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/mmcn.pdf}, 
%%%%% ERROR: Missing field
% www_section = {?????},
}
Scarlata01responderanonymity
@conference{Scarlata01responderanonymity,
  title = {Responder Anonymity and Anonymous Peer-to-Peer File Sharing}, 
  author = {Vincent Scarlata and Brian Neil Levine and Clay Shields}, 
  booktitle = {Network Protocols, 2001. Ninth International Conference on}, 
  year = {2001}, 
  month = {November}, 
  abstract = {Data transfer over TCP/IP provides no privacy for network users. Previous
        research in anonymity has focused on the provision of initiator anonymity. We
        explore methods of adapting existing initiator-anonymous protocols to provide
        responder anonymity and mutual anonymity. We present Anonymous Peer-to-peer File
        Sharing (APFS) protocols, which provide mutual anonymity for peer-topeer le
        sharing. APFS addresses the problem of longlived Internet services that may
        outlive the degradation present in current anonymous protocols. One variant of
        APFS makes use of unicast communication, but requires a central coordinator to
        bootstrap the protocol. A second variant takes advantage of multicast routing to
        remove the need for any central coordination point. We compare the TCP
        performance of APFS protocol to existing overt le sharing systems such as
        Napster. In providing anonymity, APFS can double transfer times and requires that
        additional trac be carried by peers, but this overhead is constant with the size
        of the session. 1}, 
  www_section = {anonymity, APFS, multicast}, 
  isbn = {0-7695-1429-4}, 
  url = {http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.20.7821\&rep=rep1\&type=url\&i=0},
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/APFS.pdf}, 
}
Serj02
@conference{Serj02,
  title = {Towards an Information Theoretic Metric for Anonymity}, 
  author = {Andrei Serjantov and George Danezis}, 
  booktitle = {Proceedings of Privacy Enhancing Technologies Workshop (PET 2002)}, 
  organization = {Springer-Verlag, LNCS 2482}, 
  year = {2002}, 
  month = {April}, 
  editor = {Roger Dingledine and Paul Syverson}, 
  publisher = {Springer-Verlag, LNCS 2482}, 
  abstract = {In this paper we look closely at the popular metric of anonymity, the
        anonymity set, and point out a number of problems associated with it. We then
        propose an alternative information theoretic measure of anonymity which takes
        into account the probabilities of users sending and receiving the messages and
        show how to calculate it for a message in a standard mix-based anonymity system.
        We also use our metric to compare a pool mix to a traditional threshold mix,
        which was impossible using anonymity sets. We also show how the maximum route
        length restriction which exists in some fielded anonymity systems can lead to the
        attacker performing more powerful traffic analysis. Finally, we discuss open
        problems and future work on anonymity measurements}, 
  www_section = {anonymity measurement, traffic analysis}, 
  isbn = {978-3-540-00565-0}, 
  doi = {10.1007/3-540-36467-6}, 
  url = {http://www.springerlink.com/content/wwe2c7g3hmwn0klf/}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.12.5992.pdf}, 
}
Serj02-iptps
@conference{Serj02-iptps,
  title = {Anonymizing censorship resistant systems}, 
  author = {Andrei Serjantov}, 
  booktitle = {Proceedings of the 1st International Peer To Peer Systems Workshop (IPTPS
        2002)}, 
  organization = {Springer-Verlag London, UK}, 
  year = {2002}, 
  month = {March}, 
  publisher = {Springer-Verlag London, UK}, 
  abstract = {In this paper we propose a new Peer-to-Peer architecture for a censorship
        resistant system with user, server and active-server document anonymity as well
        as efficient document retrieval. The retrieval service is layered on top of an
        existing Peer-to-Peer infrastructure, which should facilitate its implementation.
        The key idea is to separate the role of document storers from the machines
        visible to the users, which makes each individual part of the system less prone
        to attacks, and therefore to censorship. Indeed, if one server has been pressured
        into removal, the other server administrators may simply follow the precedent and
        remove the offending content themselves}, 
  www_section = {anonymity, censorship resistance, P2P}, 
  isbn = {978-3-540-44179-3}, 
  doi = {10.1007/3-540-45748-8}, 
  url = {http://portal.acm.org/citation.cfm?id=687808}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Serj02-iptps.pdf}, 
}
Serjantov02anonymizingcensorship
@booklet{Serjantov02anonymizingcensorship,
  title = {Anonymizing Censorship Resistant Systems}, 
  author = {Andrei Serjantov}, 
  volume = {Vol. 2429}, 
  year = {2002}, 
  pages = {111--120}, 
  publisher = {Springer-Verlag London, UK}, 
  abstract = {In this paper we propose a new Peer-to-Peer architecture for a censorship
        resistant system with user, server and active-server document anonymity as well
        as efficient document retrieval. The retrieval service is layered on top of an
        existing Peer-to-Peer infrastructure, which should facilitate its
        implementation}, 
  isbn = {3-540-44179-4}, 
  url = {http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.13.5048\&rep=rep1\&type=pdf},
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.13.5048.pdf}, 
%%%%% ERROR: Missing field
% www_section = {?????},
}
Serjantov02towardsan
@booklet{Serjantov02towardsan,
  title = {Towards an Information Theoretic Metric for Anonymity}, 
  author = {Andrei Serjantov and George Danezis}, 
  journal = {Lecture Notes in Computer Science}, 
  volume = {Volume 2482/2003}, 
  year = {2002}, 
  pages = {41--53}, 
  publisher = {Springer-Verlag}, 
  abstract = {In this paper we look closely at the popular metric of anonymity, the
        anonymity set, and point out a number of problems associated with it. We then
        propose an alternative information theoretic measure of anonymity which takes
        into account the probabilities of users sending and receiving the messages and
        show how to calculate it for a message in a standard mix-based anonymity system.
        We also use our metric to compare a pool mix to a traditional threshold mix,
        which was impossible using anonymity sets. We also show how the maximum route
        length restriction which exists in some fielded anonymity systems can lead to the
        attacker performing more powerful traffic analysis. Finally, we discuss open
        problems and future work on anonymity measurements}, 
  isbn = {978-3-540-00565-0}, 
  url = {http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.12.5992\&rep=rep1\&type=url\&i=0},
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/set.dvi_.pdf}, 
%%%%% ERROR: Missing field
% www_section = {?????},
}
Serjantov03puzzlesin
@conference{Serjantov03puzzlesin,
  title = {Puzzles in P2P Systems}, 
  author = {Andrei Serjantov and Stephen Lewis}, 
  booktitle = {8th CaberNet Radicals Workshop}, 
  organization = {Network of Excellence in Distributed and Dependable Computing Systems}, 
  year = {2003}, 
  month = {October}, 
  address = {Ajaccio, Corsica}, 
  publisher = {Network of Excellence in Distributed and Dependable Computing Systems}, 
  abstract = {In this paper we consider using client puzzles to provide incentives for
        users in a peer-to-peer system to behave in a uniform way. The techniques
        developed can be used to encourage users of a system to share content (combating
        the free riding problem) or perform {\textquoteleft}community' tasks}, 
  www_section = {p2p network, puzzle}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/CaberNet\%20Radicals\%20Workshop\%20-\%20Puzzles\%20in\%20P2P\%20Systems.pdf},
}
ShWa-Relationship
@conference{ShWa-Relationship,
  title = {Measuring Relationship Anonymity in Mix Networks}, 
  author = {Vitaly Shmatikov and Ming-Hsui Wang}, 
  booktitle = {Proceedings of the Workshop on Privacy in the Electronic Society (WPES
        2006)}, 
  organization = {ACM New York, NY, USA}, 
  year = {2006}, 
  month = {October}, 
  publisher = {ACM New York, NY, USA}, 
  abstract = {Many applications of mix networks such as anonymousWeb browsing require
        relationship anonymity: it should be hard for the attacker to determine who is
        communicating with whom. Conventional methods for measuring anonymity, however,
        focus on sender anonymity instead. Sender anonymity guarantees that it is
        difficult for the attacker to determine the origin of any given message exiting
        the mix network, but this may not be sufficient to ensure relationship anonymity.
        Even if the attacker cannot identify the origin of messages arriving to some
        destination, relationship anonymity will fail if he can determine with high
        probability that at least one of the messages originated from a particular
        sender, without necessarily being able to recognize this message among others. We
        give a formal definition and a calculation methodology for relationship
        anonymity. Our techniques are similar to those used for sender anonymity, but,
        unlike sender anonymity, relationship anonymity is sensitive to the distribution
        of message destinations. In particular, Zipfian distributions with skew values
        characteristic of Web browsing provide especially poor relationship anonymity.
        Our methodology takes route selection algorithms into account, and incorporates
        information-theoretic metrics such as entropy and min-entropy. We illustrate our
        methodology by calculating relationship anonymity in several simulated mix
        networks}, 
  www_section = {anonymity, privacy}, 
  isbn = {1-59593-556-8}, 
  doi = {10.1145/1179601.1179611}, 
  url = {http://portal.acm.org/citation.cfm?id=1179611}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/ShWa-Relationship.pdf}, 
}
ShWa-Timing06
@conference{ShWa-Timing06,
  title = {Timing Analysis in Low-Latency Mix Networks: Attacks and Defenses}, 
  author = {Vitaly Shmatikov and Ming-Hsui Wang}, 
  booktitle = {Proceedings of ESORICS 2006}, 
  organization = {Springer Berlin / Heidelberg}, 
  year = {2006}, 
  month = {September}, 
  publisher = {Springer Berlin / Heidelberg}, 
  abstract = {Mix networks are a popular mechanism for anonymous Internet communications.
        By routing IP traffic through an overlay chain of mixes, they aim to hide the
        relationship between its origin and destination. Using a realistic model of
        interactive Internet traffic, we study the problem of defending low-latency mix
        networks against attacks based on correlating inter-packet intervals on two or
        more links of the mix chain. We investigate several attack models, including an
        active attack which involves adversarial modification of packet flows in order to
        {\textquotedblleft}fingerprint{\textquotedblright} them, and analyze the
        tradeoffs between the amount of cover traffic, extra latency, and anonymity
        properties of the mix network. We demonstrate that previously proposed defenses
        are either ineffective, or impose a prohibitively large latency and/or bandwidth
        overhead on communicating applications. We propose a new defense based on
        adaptive padding}, 
  www_section = {anonymity}, 
  isbn = {978-3-540-44601-9}, 
  doi = {10.1007/11863908}, 
  url = {http://www.springerlink.com/content/3n136578m4211484/}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/ShWa-Timing06.pdf}, 
}
Shafaat:2008:PAN:1485753.1485763
@conference{Shafaat:2008:PAN:1485753.1485763,
  title = {A Practical Approach to Network Size Estimation for Structured Overlays}, 
  author = {Shafaat, Tallat M. and Ali Ghodsi and Seif Haridi}, 
  booktitle = {IWSOS'08--Proceedings of the 3rd International Workshop on Self-Organizing
        Systems}, 
  organization = {Springer-Verlag}, 
  volume = {5343}, 
  year = {2008}, 
  month = {December}, 
  address = {Vienna, Austria}, 
  pages = {71--83}, 
  publisher = {Springer-Verlag}, 
  series = {Lecture Notes in Computer Science}, 
  abstract = {Structured overlay networks have recently received much attention due to
        their self-* properties under dynamic and decentralized settings. The number of
        nodes in an overlay fluctuates all the time due to churn. Since knowledge of the
        size of the overlay is a core requirement for many systems, estimating the size
        in a decentralized manner is a challenge taken up by recent research activities.
        Gossip-based Aggregation has been shown to give accurate estimates for the
        network size, but previous work done is highly sensitive to node failures. In
        this paper, we present a gossip-based aggregation-style network size estimation
        algorithm. We discuss shortcomings of existing aggregation-based size estimation
        algorithms, and give a solution that is highly robust to node failures and is
        adaptive to network delays. We examine our solution in various scenarios to
        demonstrate its effectiveness}, 
  www_section = {network size estimation, structured overlays}, 
  isbn = {978-3-540-92156-1}, 
  doi = {http://dx.doi.org/10.1007/978-3-540-92157-8_7}, 
  url = {http://dx.doi.org/10.1007/978-3-540-92157-8_7}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/IWSOS\%2708\%20-\%20Network\%20Size\%20Estimation\%20for\%20Structured\%20Overlays.pdf},
}
Sherr07towardsapplication-aware
@conference{Sherr07towardsapplication-aware,
  title = {Towards application-aware anonymous routing}, 
  author = {Micah Sherr and Boon Thau and Matt Blaze}, 
  booktitle = {In Second USENIX Workshop on Hot Topics in Security (HotSec}, 
  organization = {USENIX Association Berkeley, CA, USA}, 
  year = {2007}, 
  publisher = {USENIX Association Berkeley, CA, USA}, 
  abstract = {This paper investigates the problem of designing anonymity networks that meet
        application-specific performance and security constraints. We argue that existing
        anonymity networks take a narrow view of performance by considering only the
        strength of the offered anonymity. However, real-world applications impose a
        myriad of communication requirements, including end-to-end bandwidth and latency,
        trustworthiness of intermediary routers, and network jitter. We pose a grand
        challenge for anonymity: the development of a network architecture that enables
        applications to customize routes that tradeoff between anonymity and performance.
        Towards this challenge, we present the Application-Aware Anonymity (A3) routing
        service. We envision that A3 will serve as a powerful and flexible anonymous
        communications layer that will spur the future development of anonymity
        services}, 
  www_section = {anonymity, routing}, 
  url = {http://portal.acm.org/citation.cfm?id=1361423}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/a3.pdf}, 
}
Sherwood_p5:a
@article{Sherwood_p5:a,
  title = {P5: A Protocol for Scalable Anonymous Communication}, 
  author = {Rob Sherwood and Bobby Bhattacharjee and Aravind Srinivasan}, 
  journal = {Journal of Computer Security}, 
  volume = {Volume 13 ,}, 
  year = {2002}, 
  month = {December}, 
  pages = {839--876}, 
  publisher = {IOS Press Amsterdam, The Netherlands}, 
  abstract = {We present a protocol for anonymous communication over the Internet. Our
        protocol, called P (Peer-to-Peer Personal Privacy Protocol) provides sender-,
        receiver-, and sender-receiver anonymity. P is designed to be implemented over
        the current Internet protocols, and does not require any special infrastructure
        support. A novel feature of P is that it allows individual participants to
        trade-off degree of anonymity for communication efficiency, and hence can be used
        to scalably implement large anonymous groups. We present a description of P , an
        analysis of its anonymity and communication efficiency, and evaluate its
        performance using detailed packet-level simulations}, 
  url = {http://www.cs.umd.edu/projects/p5/p5.pdf}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/p5.pdf}, 
%%%%% ERROR: Missing field
% www_section = {?????},
}
Shields00aprotocol
@conference{Shields00aprotocol,
  title = {A Protocol for Anonymous Communication Over the Internet}, 
  author = {Clay Shields and Brian Neil Levine}, 
  booktitle = {In ACM Conference on Computer and Communications Security}, 
  organization = {ACM Press}, 
  year = {2000}, 
  pages = {33--42}, 
  publisher = {ACM Press}, 
  abstract = {This paper presents a new protocol for initiator anonymity called Hordes,
        which uses forwarding mechanisms similar to those used in previous protocols for
        sending data, but is the first protocol to make use of the anonymity inherent in
        multicast routing to receive data. We show this results in shorter transmission
        latencies and requires less work of the protocol participants, in terms of the
        messages processed. We also present a comparison of the security and anonymity of
        Hordes with previous protocols, using the first quantitative definition of
        anonymity and unlinkability. Our analysis shows that Hordes provides anonymity in
        a degree similar to that of Crowds and Onion Routing, but also that Hordes has
        numerous performance advantages}, 
  www_section = {Hordes, multicast}, 
  url = {http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.37.3890\&rep=rep1\&type=url\&i=0},
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/hordes-final-all.dvi_.pdf},
}
Shnayder04simulatingthe
@conference{Shnayder04simulatingthe,
  title = {Simulating the power consumption of large-scale sensor network applications}, 
  author = {Victor Shnayder and Mark Hempstead and Bor-rong Chen and Geoff Werner Allen and
        Matt Welsh}, 
  booktitle = {In Sensys}, 
  organization = {ACM Press}, 
  year = {2004}, 
  pages = {188--200}, 
  publisher = {ACM Press}, 
  abstract = {Developing sensor network applications demands a new set of tools to aid
        programmers. A number of simulation environments have been developed that provide
        varying degrees of scalability, realism, and detail for understanding the
        behavior of sensor networks. To date, however, none of these tools have addressed
        one of the most important aspects of sensor application design: that of power
        consumption. While simple approximations of overall power usage can be derived
        from estimates of node duty cycle and communication rates, these techniques often
        fail to capture the detailed, low-level energy requirements of the CPU, radio,
        sensors, and other peripherals. In this paper, we present, a scalable simulation
        environment for wireless sensor networks that provides an accurate, per-node
        estimate of power consumption. PowerTOSSIM is an extension to TOSSIM, an
        event-driven simulation environment for TinyOS applications. In PowerTOSSIM,
        TinyOS components corresponding to specific hardware peripherals (such as the
        radio, EEPROM, LEDs, and so forth) are instrumented to obtain a trace of each
        device's activity during the simulation runPowerTOSSIM employs a novel
        code-transformation technique to estimate the number of CPU cycles executed by
        each node, eliminating the need for expensive instruction-level simulation of
        sensor nodes. PowerTOSSIM includes a detailed model of hardware energy
        consumption based on the Mica2 sensor node platform. Through instrumentation of
        actual sensor nodes, we demonstrate that PowerTOSSIM provides accurate estimation
        of power consumption for a range of applications and scales to support very large
        simulations}, 
  www_section = {sensor networks, TinyOS}, 
  doi = {10.1145/1031495.1031518}, 
  url = {http://portal.acm.org/citation.cfm?id=1031495.1031518}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.65.9976.pdf}, 
}
Sirer_cliquenet:a
@article{Sirer_cliquenet:a,
  title = {CliqueNet: A Self-Organizing, Scalable, Peer-to-Peer Anonymous Communication
        Substrate}, 
  author = {Emin G{\"u}n Sirer and Milo Polte and Mark Robson}, 
  journal = {unknown}, 
  institution = {Cornell}, 
  year = {2001}, 
  address = {Ithaca}, 
  abstract = {Anonymity is critical for many networked applications. Yet current Internet
        protocols provide no support for masking the identity of communication endpoints.
        This paper outlines a design for a peer-to-peer, scalable, tamper-resilient
        communication protocol that provides strong anonymity and privacy. Called
        CliqueNet, our protocol provides an information-theoretic guarantee: an
        omnipotent adversary that can wiretap at any location in the network cannot
        determine the sender of a packet beyond a clique, that is, a set of k hosts,
        where k is an anonymizing factor chosen by the participants. CliqueNet is
        resilient to jamming by malicious hosts and can scale with the number of
        participants. This paper motivates the need for an anonymous communication layer
        and describes the self-organizing, novel divide-and-conquer approach that enables
        CliqueNet to scale while offering a strong anonymity guarantee. CliqueNet is
        widely applicable as a communication substrate for peer-to-peer applications that
        require anonymity, privacy and anti-censorship guarantees}, 
  www_section = {anonymity, CliqueNet, DC-network}, 
  issn = {TR2001}, 
  url = {http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.22.4785\&rep=rep1\&type=url\&i=0},
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/cliquenet-iptp.pdf}, 
}
Song00practicaltechniques
@conference{Song00practicaltechniques,
  title = {Practical Techniques for Searches on Encrypted Data}, 
  author = {Dawn Xiaodong Song and David Wagner and Adrian Perrig}, 
  booktitle = {Security and Privacy, 2000. S\&P 2000. Proceedings. 2000 IEEE Symposium
        on}, 
  year = {2000}, 
  month = {January}, 
  address = {Berkeley, CA, USA}, 
  abstract = {It is desirable to store data on data storage servers such as mail servers
        and file servers in encrypted form to reduce security and privacy risks. But this
        usually implies that one has to sacrifice functionality for security. For
        example, if a client wishes to retrieve only documents containing certain words,
        it was not previously known how to let the data storage server perform the search
        and answer the query without loss of data confidentiality}, 
  isbn = {0-7695-0665-8}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/encrypteddata.pdf}, 
  url = {https://bibliography.gnunet.org}, 
%%%%% ERROR: Missing field
% www_section = {?????},
}
Stathopoulos07end-to-endrouting
@conference{Stathopoulos07end-to-endrouting,
  title = {End-to-end routing for dualradio sensor networks}, 
  author = {Thanos Stathopoulos and Heidemann, John and Martin Lukac and Deborah Estrin and
        William J. Kaiser}, 
  booktitle = {In INFOCOM}, 
  year = {2007}, 
  pages = {2252--2260}, 
  abstract = {Dual-radio, dual-processor nodes are an emerging class of Wireless Sensor
        Network devices that provide both lowenergy operation as well as substantially
        increased computational performance and communication bandwidth for applications.
        In such systems, the secondary radio and processor operates with sufficiently low
        power that it may remain always vigilant, while the the main processor and
        primary, high-bandwidth radio remain off until triggered by the application. By
        exploiting the high energy efficiency of the main processor and primary radio
        along with proper usage, net operating energy benefits are enabled for
        applications. The secondary radio provides a constantly available multi-hop
        network, while paths in the primary network exist only when required. This paper
        describes a topology control mechanism for establishing an end-to-end path in a
        network of dual-radio nodes using the secondary radios as a control channel to
        selectively wake up nodes along the required end-to-end path. Using numerical
        models as well as testbed experimentation, we show that our proposed mechanism
        provides significant energy savings of more than 60 \% compared to alternative
        approaches, and that it incurs only moderately greater application latency}, 
  www_section = {routing, wireless sensor network}, 
  url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.87.8984}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Stathopoulos07a.pdf}, 
}
Stefansson06myriadstore
@booklet{Stefansson06myriadstore,
  title = {MyriadStore: A Peer-to-Peer Backup System}, 
  author = {Birgir Stefansson and Antonios Thodis and Ali Ghodsi and Seif Haridi}, 
  year = {2006}, 
  abstract = {Traditional backup methods are error prone, cumbersome and expensive.
        Distributed backup applications have emerged as promising tools able to avoid
        these disadvantages, by exploiting unused disk space of remote computers. In this
        paper we propose MyriadStore, a distributed peer-to-peer backup system.
        MyriadStore makes use of a trading scheme that ensures that a user has as much
        available storage space in the system as the one he/she contributes to it. A
        mechanism for making challenges between the system's nodes ensures that this
        restriction is fulfilled. Furthermore, MyriadStore minimizes bandwidth
        requirements and migration costs by treating separately the storage of the
        system's meta-data and the storage of the backed up data. This approach also
        offers great flexibility on the placement of the backed up data, a property that
        facilitates the deployment of the trading scheme}, 
  www_section = {backup, P2P}, 
  url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.102.6985}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.126.5915.pdf}, 
}
Steiner:2007:GVK:1298306.1298323
@conference{Steiner:2007:GVK:1298306.1298323,
  title = {A global view of KAD}, 
  author = {Steiner, Moritz and En-Najjary, Taoufik and E W Biersack}, 
  booktitle = {IMC'07--Proceedings of the 7th ACM SIGCOMM Conference on Internet
        Measurement}, 
  organization = {ACM}, 
  year = {2007}, 
  month = {October}, 
  address = {San Diego, CA, USA}, 
  pages = {117--122}, 
  publisher = {ACM}, 
  series = {IMC '07}, 
  abstract = {Distributed hash tables (DHTs) have been actively studied in literature and
        many different proposals have been made on how to organize peers in a DHT.
        However, very few DHT shave been implemented in real systems and deployed on
        alarge scale. One exception is KAD, a DHT based on Kademlia, which is
        part of eDonkey2000, a peer-to-peer file sharing system with several million
        simultaneous users. We have been crawling KAD continuously for about
        six months and obtained information about the total number of peers online and
        their geographical distribution. Peers are identified by the so called KAD ID,
        which was up to now assumed to remain the same across sessions. However, we
        observed that this is not the case: There is a large number of peers, in
        particular in China, that change their KAD ID, sometimes as frequently as after
        each session. This change of KAD IDs makes it difficult to characterize end-user
        availability or membership turnover}, 
  www_section = {distributed hash table, lookup, peer-to-peer networking}, 
  isbn = {978-1-59593-908-1}, 
  doi = {http://doi.acm.org/10.1145/1298306.1298323}, 
  url = {http://doi.acm.org/10.1145/1298306.1298323}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/IMC\%2707\%20-\%20A\%20global\%20view\%20of\%20KAD.pdf},
}
Stemm96reducingpower
@booklet{Stemm96reducingpower,
  title = {Reducing Power Consumption of Network Interfaces in Hand-Held Devices (Extended
        Abstract)}, 
  author = {Mark Stemm and Paul Gauthier and Daishi Harada and Katz, Randy H.}, 
  year = {1996}, 
  abstract = {An important issue to be addressed for the next generation of
        wirelessly-connected hand-held devices is battery longevity. In this paper we
        examine this issue from the point of view of the Network Interface (NI). In
        particular, we measure the power usage of two PDAs, the Apple Newton Messagepad
        and Sony Magic Link, and four NIs, the Metricom Ricochet Wireless Modem, the
        AT\&T Wavelan operating at 915 MHz and 2.4 GHz, and the IBM Infrared Wireless
        LAN Adapter. These measurements clearly indicate that the power drained by the
        network interface constitutes a large fraction of the total power used by the
        PDA. We also conduct trace-driven simulation experiments and show that by using
        applicationspecific policies it is possible to}, 
  url = {https://bibliography.gnunet.org}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.39.8384.pdf}, 
  www_section = {Unsorted}, 
}
Stoica01chord:a
@conference{Stoica01chord:a,
  title = {Chord: A Scalable Peer-to-Peer Lookup Service for Internet Applications}, 
  author = {Ion Stoica and Robert Morris and David Karger and Frans M. Kaashoek and Hari
        Balakrishnan}, 
  booktitle = {Proceedings of the 2001 conference on Applications, technologies,
        architectures, and protocols for computer communications}, 
  organization = {ACM New York, NY, USA}, 
  year = {2001}, 
  month = {January}, 
  address = {San Diego, California, United States}, 
  pages = {149--160}, 
  publisher = {ACM New York, NY, USA}, 
  abstract = {Efficiently determining the node that stores a data item in a distributed
        network is an important and challenging problem. This paper describes the
        motivation and design of the Chord system, a decentralized lookup service that
        stores key/value pairs for such networks. The Chord protocol takes as input an
        m-bit identifier (derived by hashing a higher-level application specific key),
        and returns the node that stores the value corresponding to that key. Each Chord
        node is identified by an m-bit identifier and each node stores the key
        identifiers in the system closest to the node's identifier. Each node maintains
        an m-entry routing table that allows it to look up keys efficiently. Results from
        theoretical analysis, simulations, and experiments show that Chord is
        incrementally scalable, with insertion and lookup costs scaling logarithmically
        with the number of Chord nodes}, 
  www_section = {Chord, distributed hash table}, 
  isbn = {1-58113-411-8}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/chord_sigcomm.pdf}, 
}
Stumm:1987:SDR:55482.55508
@conference{Stumm:1987:SDR:55482.55508,
  title = {Strategies for decentralized resource management}, 
  author = {Stumm, Michael}, 
  booktitle = {SIGCOMM'87. Proceedings of the ACM Workshop on Frontiers in Computer
        Communications Technology}, 
  organization = {ACM}, 
  year = {1987}, 
  month = {August}, 
  address = {Stowe, VT, USA}, 
  pages = {245--253}, 
  publisher = {ACM}, 
  series = {SIGCOMM '87}, 
  abstract = {Decentralized resource management in distributed systems has become more
        practical with the availability of communication facilities that support
        multicasting. In this paper we present several example solutions for managing
        resources in a decentralized fashion, using multicasting facilities. We review
        the properties of these solutions in terms of scalability, fault tolerance and
        efficiency. We conclude that decentralized solutions compare favorably to
        centralized solutions with respect to all three criteria}, 
  www_section = {decentralized, distributed systems, multicasting}, 
  isbn = {0-89791-245-4}, 
  doi = {http://doi.acm.org/10.1145/55482.55508}, 
  url = {http://doi.acm.org/10.1145/55482.55508}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/SIGCOMM\%2787\%20-\%20Strategies\%20for\%20decentralized\%20resource\%20management.pdf},
}
Stutzbach:2006:UCP:1177080.1177105
@conference{Stutzbach:2006:UCP:1177080.1177105,
  title = {Understanding churn in peer-to-peer networks}, 
  author = {Stutzbach, Daniel and Rejaie, Reza}, 
  booktitle = {IMC'06. Proceedings of the 6th ACM SIGCOMM Conference on Internet
        Measurement}, 
  organization = {ACM}, 
  year = {2006}, 
  month = {October}, 
  address = {Rio de Janeriro, Brazil}, 
  pages = {189--202}, 
  publisher = {ACM}, 
  series = {IMC '06}, 
  abstract = {The dynamics of peer participation, or churn, are an inherent property of
        Peer-to-Peer (P2P) systems and critical for design and evaluation. Accurately
        characterizing churn requires precise and unbiased information about the arrival
        and departure of peers, which is challenging to acquire. Prior studies show that
        peer participation is highly dynamic but with conflicting characteristics.
        Therefore, churn remains poorly understood, despite its significance.In this
        paper, we identify several common pitfalls that lead to measurement error. We
        carefully address these difficulties and present a detailed study using three
        widely-deployed P2P systems: an unstructured file-sharing system (Gnutella), a
        content-distribution system (BitTorrent), and a Distributed Hash Table (Kad). Our
        analysis reveals several properties of churn: (i) overall dynamics are
        surprisingly similar across different systems, (ii) session lengths are not
        exponential, (iii) a large portion of active peers are highly stable while the
        remaining peers turn over quickly, and (iv) peer session lengths across
        consecutive appearances are correlated. In summary, this paper advances our
        understanding of churn by improving accuracy, comparing different P2P file
        sharingdistribution systems, and exploring new aspects of churn}, 
  www_section = {BitTorrent, churn, Gnutella, KAD, peer-to-peer networking, session length,
        uptime}, 
  isbn = {1-59593-561-4}, 
  doi = {http://doi.acm.org/10.1145/1177080.1177105}, 
  url = {http://doi.acm.org/10.1145/1177080.1177105}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/IMC\%2706\%20-\%20Understanding\%20churn\%20in\%20p2p\%20networks.pdf},
}
SupritiSinghMasterThesis2014
@mastersthesis{SupritiSinghMasterThesis2014,
  title = {Experimental comparison of Byzantine fault tolerant distributed hash tables}, 
  author = {Supriti Singh}, 
  school = {Saarland University}, 
  volume = {M.S}, 
  year = {2014}, 
  month = {September}, 
  address = {Saarbruecken}, 
  pages = {0--42}, 
  type = {Masters}, 
  abstract = {Distributed Hash Tables (DHTs) are a key data structure for construction of a
        peer to peer systems. They provide an efficient way to distribute the storage and
        retrieval of key-data pairs among the participating peers. DHTs should be
        scalable, robust against churn and resilient to attacks. X-Vine is a DHT protocol
        which offers security against Sybil attacks. All communication among peers is
        performed over social network links, with the presumption that a friend can be
        trusted. This trust can be extended to a friend of a friend. It uses the tested
        Chord Ring topology as an overlay, which has been proven to be scalable and
        robust. The aim of the thesis is to experimentally compare two DHTs, R5 N and
        X-Vine. GNUnet is a free software secure peer to peer framework, which uses R 5N
        . In this thesis, we have presented the implementation of X-Vine on GNUnet, and
        compared the performance of R5 N and X-Vine}, 
  www_section = {DHT, GNUnet, performance analysis, testbed, X-vine}, 
  www_tags = {selected}, 
  url = {https://bibliography.gnunet.org}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/SupritiSinghMasterThesis.pdf},
}
TH04
@conference{TH04,
  title = {Measuring Anonymity in a Non-adaptive, Real-time System}, 
  author = {Gergely T{\'o}th and Zolt{\'a}n Horn{\'a}k}, 
  booktitle = {Proceedings of Privacy Enhancing Technologies workshop (PET 2004)}, 
  volume = {3424}, 
  year = {2004}, 
  pages = {226--241}, 
  series = {Springer-Verlag, LNCS}, 
  abstract = {Anonymous message transmission should be a key feature in network
        architectures ensuring that delivered messages are impossible-or at least
        infeasible-to be traced back to their senders. For this purpose the formal model
        of the non-adaptive, real-time PROB-channel will be introduced. In this model
        attackers try to circumvent applied protection measures and to link senders to
        delivered messages. In order to formally measure the level of anonymity provided
        by the system, the probability will be given, with which observers can determine
        the senders of delivered messages (source-hiding property) or the recipients of
        sent messages (destination-hiding property). In order to reduce the certainty of
        an observer, possible counter-measures will be defined that will ensure specified
        upper limit for the probability with which an observer can mark someone as the
        sender or recipient of a message. Finally results of simulations will be shown to
        demonstrate the strength of the techniques}, 
  isbn = {3-540-26203-2}, 
  url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.77.851}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/TH04.pdf}, 
%%%%% ERROR: Missing field
% www_section = {?????},
}
THV04
@conference{THV04,
  title = {Measuring Anonymity Revisited}, 
  author = {Gergely T{\'o}th and Zolt{\'a}n Horn{\'a}k and Ferenc Vajda}, 
  booktitle = {Proceedings of the Ninth Nordic Workshop on Secure IT Systems}, 
  year = {2004}, 
  month = {November}, 
  address = {Espoo, Finland}, 
  pages = {85--90}, 
  editor = {Sanna Liimatainen and Teemupekka Virtanen}, 
  abstract = {Anonymous message transmission systems are the building blocks of several
        high-level anonymity services (e.g. epayment, e-voting). Therefore, it is
        essential to give a theoretically based but also practically usable objective
        numerical measure for the provided level of anonymity. In this paper two
        entropybased anonymity measures will be analyzed and some shortcomings of these
        methods will be highlighted. Finally, source- and destination-hiding properties
        will be introduced for so called local anonymity, an aspect reflecting the point
        of view of the users}, 
  www_section = {anonymity, anonymity measurement}, 
  url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.61.7843}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/THV04.pdf}, 
}
Tamilmani04swift:a
@conference{Tamilmani04swift:a,
  title = {SWIFT: A System With Incentives For Trading}, 
  author = {Karthik Tamilmani and Vinay Pai and Alexander E. Mohr}, 
  booktitle = {P2PECON'04. Proceedings of the 2nd Workshop on Economics of Peer-to-Peer
        Systems}, 
  year = {2004}, 
  month = {June}, 
  address = {Cambridge, Massachusetts, USA}, 
  abstract = {In this paper, we present the design of a credit-based trading mechanism for
        peer-to-peer file sharing networks. We divide files into verifiable pieces; every
        peer interested in a file requests these pieces individually from the peers it is
        connected to. Our goal is to build a mechanism that supports fair large scale
        distribution in which downloads are fast, with low startup latency. We build a
        trading model in which peers use a pairwise currency to reconcile trading
        differences with each other and examine various trading strategies that peers can
        adopt. We show through analysis and simulation that peers who contribute to the
        network and take risks receive the most benefit in return. Our simulations
        demonstrate that peers who set high upload rates receive high download rates in
        return, but free-riders download very slowly compared to peers who upload.
        Finally, we propose a default trading strategy that is good for both the network
        as a whole and the peer employing it: deviating from that strategy yields little
        or no advantage for the peer}, 
  www_section = {SWIFT, trading}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/P2PEcon\%2704\%20-\%20SWIFT.pdf},
}
Tanenbaum86usingsparse
@conference{Tanenbaum86usingsparse,
  title = {Using Sparse Capabilities in a Distributed Operating System}, 
  author = {Andrew Tanenbaum and Sape J. Mullender and Robbert Van Renesse}, 
  booktitle = {Using Sparse Capabilities in a Distributed Operating System}, 
  year = {1986}, 
  pages = {558--563}, 
  abstract = {this paper we discuss a system, Amoeba, that uses capabilities for naming and
        protecting objects. In contrast to traditional, centralized operating systems, in
        which capabilities are managed by the operating system kernel, in Amoeba all the
        capabilities are managed directly by user code. To prevent tampering, the
        capabilities are protected cryptographically. The paper describes a variety of
        the issues involved, and gives four different ways of dealing with the access
        rights}, 
  url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.49.7998}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.56.3350.pdf}, 
  www_section = {Unsorted}, 
}
Tang:2007:ESE:1260204.1260647
@conference{Tang:2007:ESE:1260204.1260647,
  title = {Empirical Study on the Evolution of PlanetLab}, 
  author = {Tang, Li and Chen, Yin and Li, Fei and Zhang, Hui and Li, Jun}, 
  booktitle = {ICN'07--Proceedings of the 6th International Conference on Networking}, 
  organization = {IEEE Computer Society}, 
  year = {2007}, 
  month = {April}, 
  address = {Sainte-Luce, Martinique, France}, 
  pages = {0--64}, 
  publisher = {IEEE Computer Society}, 
  abstract = {PlanetLab is a globally distributed overlay platform that has been
        increasingly used by researchers to deploy and assess planetary-scale network
        services. This paper analyzes some particular advantages of PlanetLab, and then
        investigates its evolution process, geographical node-distribution, and network
        topological features. The revealed results are helpful for researchers to 1)
        understand the history of PlanetLab and some of its important properties
        quantitatively; 2) realize the dynamic of PlanetLab environment and design
        professional experiments; 3) select stable nodes that possess a high probability
        to run continuously for a long time; and 4) objectively and in depth evaluate the
        experimental results}, 
  www_section = {overlay, PlanetLab, topology}, 
  isbn = {0-7695-2805-8}, 
  doi = {10.1109/ICN.2007.40}, 
  url = {http://dl.acm.org/citation.cfm?id=1260204.1260647}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/ICN\%2707\%20-\%20PlanetLab.pdf},
}
Tariq:2011:MSQ:2063320.2063330
@article{Tariq:2011:MSQ:2063320.2063330,
  title = {Meeting subscriber-defined QoS constraints in publish/subscribe systems}, 
  author = {Tariq, Muhammad Adnan and Boris Koldehofe and Gerald G. Koch and Khan, Imran
        and Kurt Rothermel}, 
  journal = {Concurr. Comput. : Pract. Exper}, 
  volume = {23}, 
  number = {17}, 
  year = {2011}, 
  address = {Chichester, UK}, 
  pages = {2140--2153}, 
  publisher = {John Wiley and Sons Ltd}, 
  www_section = {content-based, publish/subscribe, QoS}, 
  issn = {1532-0626}, 
  doi = {10.1002/cpe.1751}, 
  url = {http://dx.doi.org/10.1002/cpe.1751}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Tariq2011Meeting.pdf}, 
}
Tariqetal:2009:ProbLatencyBounds
@conference{Tariqetal:2009:ProbLatencyBounds,
  title = {Providing Probabilistic Latency Bounds for Dynamic Publish/Subscribe Systems}, 
  author = {Tariq, Muhammad Adnan and Boris Koldehofe and Gerald G. Koch and Kurt
        Rothermel}, 
  booktitle = {Kommunikation in Verteilten Systemen (KiVS)}, 
  organization = {Gesellschaft fuer Informatik(GI)}, 
  year = {2009}, 
  pages = {155--166}, 
  publisher = {Gesellschaft fuer Informatik(GI)}, 
  abstract = {In the context of large decentralized many-to-many communication systems it
        is impractical to provide realistic and hard bounds for certain QoS metrics
        including latency bounds. Nevertheless, many applications can yield better
        performance if such bounds hold with a given probability. In this paper we show
        how probabilistic latency bounds can be applied in the context of
        publish/subscribe. We present an algorithm for maintaining individual
        probabilistic latency bounds in a highly dynamic environment for a large number
        of subscribers. The algorithm consists of an adaptive dissemination algorithm as
        well as a cluster partitioning scheme. Together they ensure i) adaptation to the
        individual latency requirements of subscribers under dynamically changing system
        properties, and ii) scalability by determining appropriate clusters according to
        available publishers in the system}, 
  www_section = {publish/subscribe, QoS}, 
  isbn = {978-3-540-92666-5}, 
  doi = {10.1007/978-3-540-92666-5}, 
  url = {http://www.springerlink.com/content/x36578745jv7wr88/}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/QoS_pubsub.pdf}, 
}
Tati06onobject
@conference{Tati06onobject,
  title = {On Object Maintenance in Peer-to-Peer Systems}, 
  author = {Kiran Tati and Geoffrey M. Voelker}, 
  booktitle = {IPTPS'06--Proceedings of the 5th International Workshop on Peer-to-Peer
        Systems}, 
  year = {2006}, 
  month = {February}, 
  address = {Santa Barbara, CA, USA}, 
  abstract = {This paper, we revisit object maintenance in peer-to-peer systems, focusing
        on how temporary and permanent churn impact the overheads associated with object
        maintenance. We have a number of goals: to highlight how different environments
        exhibit different degrees of temporary and permanent churn; to provide further
        insight into how churn in different environments affects the tuning of object
        maintenance strategies; and to examinehow object maintenance and churn interact
        with other constraints such as storage capacity. When possible, we highlight
        behavior independent of particular object maintenance strategies. When an issue
        depends on a particular strategy, though, we explore it in the context of a
        strategy in essence similar to TotalRecall, which uses erasure coding, lazy
        repair of data blocks, and random indirect placement (we also assume that repairs
        incorporate remaining blocks rather than regenerating redundancy from scratch)}, 
  www_section = {churn, P2P, peer-to-peer networking}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/IPTPS\%2706\%20-\%20On\%20object\%20maintenance\%20in\%20p2p\%20systems.pdf},
}
Terpstra:2007:BRP:1282427.1282387
@article{Terpstra:2007:BRP:1282427.1282387,
  title = {Bubblestorm: resilient, probabilistic, and exhaustive peer-to-peer search}, 
  author = {Terpstra, Wesley W. and Jussi Kangasharju and Leng, Christof and Buchmann,
        Alejandro P.}, 
  journal = {SIGCOMM Computer Communication Review}, 
  volume = {37}, 
  year = {2007}, 
  month = {August}, 
  address = {New York, NY, USA}, 
  pages = {49--60}, 
  publisher = {ACM}, 
  abstract = {Peer-to-peer systems promise inexpensive scalability, adaptability, and
        robustness. Thus, they are an attractive platform for file sharing, distributed
        wikis, and search engines. These applications often store weakly structured data,
        requiring sophisticated search algorithms. To simplify the search problem, most
        scalable algorithms introduce structure to the network. However, churn or violent
        disruption may break this structure, compromising search guarantees. This paper
        proposes a simple probabilistic search system, BubbleStorm, built on random
        multigraphs. Our primary contribution is a flexible and reliable strategy for
        performing exhaustive search. BubbleStorm also exploits the heterogeneous
        bandwidth of peers. However, we sacrifice some of this bandwidth for high
        parallelism and low latency. The provided search guarantees are tunable, with
        success probability adjustable well into the realm of reliable systems. For
        validation, we simulate a network with one million low-end peers and show
        BubbleStorm handles up to 90\% simultaneous peer departure and 50\% simultaneous
        crash}, 
  www_section = {exhaustive search, peer-to-peer networking, resilience, simulation}, 
  issn = {0146-4833}, 
  doi = {http://doi.acm.org/10.1145/1282427.1282387}, 
  url = {http://doi.acm.org/10.1145/1282427.1282387}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/SIGCOMM\%20Computers\%20Communication\%20Review\%20-\%20Bubblestorm.pdf},
}
Thomas:2002:MAO:767821.769444
@article{Thomas:2002:MAO:767821.769444,
  title = {A Market-Based Approach to Optimal Resource Allocation in Integrated-Services
        Connection-Oriented Networks}, 
  author = {Thomas, Panagiotis and Teneketzis, Demosthenis and MacKie-Mason, Jeffrey K.}, 
  journal = {Operations Research}, 
  volume = {50}, 
  number = {4}, 
  year = {2002}, 
  month = {July}, 
  address = {Institute for Operations Research and the Management Sciences (INFORMS),
        Linthicum, Maryland, USA}, 
  pages = {603--616}, 
  publisher = {INFORMS}, 
  abstract = {We present an approach to the admission control and resource allocation
        problem in connection-oriented networks that offer multiple services to users.
        Users' preferences are summarized by means of their utility functions, and each
        user is allowed to request more than one type of service. Multiple types of
        resources are allocated at each link along the path of a connection. We assume
        that the relation between Quality of Service (QoS) and resource allocation is
        given, and we incorporate it as a constraint into a static optimization problem.
        The objective of the optimization problem is to determine the amount of and
        required resources for each type of service to maximize the sum of the users'
        utilities. We prove the existence of a solution of the optimization problem and
        describe a competitive market economy that implements the solution and satisfies
        the informational constraints imposed by the nature of the decentralized resource
        allocation problem. The economy consists of four different types of agents:
        resource providers, service providers, users, and an auctioneer that regulates
        the prices based on the observed aggregate excess demand. The goods that are sold
        are: (i) the resources at each link of the network, and (ii) services constructed
        from these resources and then delivered to users. We specify an iterative
        procedure that is used by the auctioneer to update the prices, and we show that
        it leads to an allocation that is arbitrarily close to a solution of the
        optimization problem in a finite number of iterations}, 
  www_section = {algorithms, economics, integrated-services networks, network, nonlinear,
        pricing schemes, programming, resource allocation}, 
  issn = {0030-364X}, 
  doi = {10.1287/opre.50.4.603.2862}, 
  url = {http://dx.doi.org/10.1287/opre.50.4.603.2862}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Oper.\%20Res.\%20-\%20Optimal\%20Resource\%20Allocation.pdf},
}
Tolia03opportunisticuse
@conference{Tolia03opportunisticuse,
  title = {Opportunistic Use of Content Addressable Storage for Distributed File Systems}, 
  author = {Niraj Tolia and Michael Kozuch and Satyanarayanan, Mahadev and Brad Karp and
        Thomas Bressoud and Adrian Perrig}, 
  booktitle = {In Proceedings of the 2003 USENIX Annual Technical Conference}, 
  year = {2003}, 
  pages = {127--140}, 
  abstract = {Motivated by the prospect of readily available Content Addressable Storage
        (CAS), we introduce the concept of file recipes. A file's recipe is a first-class
        file system object listing content hashes that describe the data blocks composing
        the file. File recipes provide applications with instructions for reconstructing
        the original file from available CAS data blocks. We describe one such
        application of recipes, the CASPER distributed file system. A CASPER client
        opportunistically fetches blocks from nearby CAS providers to improve its
        performance when the connection to a file server traverses a low-bandwidth path.
        We use measurements of our prototype to evaluate its performance under varying
        network conditions. Our results demonstrate significant improvements in execution
        times of applications that use a network file system. We conclude by describing
        fuzzy block matching, a promising technique for using approximately matching
        blocks on CAS providers to reconstitute the exact desired contents of a file at a
        client}, 
  www_section = {file systems, storage}, 
  url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.10.740}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/casper-usenix2003.pdf}, 
}
TrafHTTP
@conference{TrafHTTP,
  title = {Statistical Identification of Encrypted Web Browsing Traffic}, 
  author = {Qixiang Sun and Daniel R. Simon and Yi-Min Wang and Wilf Russell and Venkata N.
        Padmanabhan and Lili Qiu}, 
  booktitle = {Proceedings of the 2002 IEEE Symposium on Security and Privacy}, 
  organization = {IEEE Computer Society Washington, DC, USA}, 
  year = {2002}, 
  month = {May}, 
  address = {Berkeley, California}, 
  publisher = {IEEE Computer Society Washington, DC, USA}, 
  abstract = {Encryption is often proposed as a tool for protecting the privacy of World
        Wide Web browsing.However, encryption--particularly astypically implemented in,
        or in concert with popular Webbrowsers--does not hide all information about the
        encryptedplaintext.Specifically, HTTP object count and sizes are oftenrevealed
        (or at least incompletely concealed). We investigate theidentifiability of World
        Wide Web traffic based on this unconcealedinformation in a large sample of Web
        pages, and show that it sufficesto identify a significant fraction of them quite
        reliably.We also suggest some possible countermeasures against the exposure of
        this kind of information and experimentally evaluate their effectiveness}, 
  www_section = {encryption, privacy}, 
  isbn = {0-7695-1543-6}, 
  url = {http://portal.acm.org/citation.cfm?id=830535}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/tr-2002-23.pdf}, 
}
UCAM-CL-TR-637
@booklet{UCAM-CL-TR-637,
  title = {The Topology of Covert Conflict}, 
  author = {Shishir Nagaraja and Ross Anderson}, 
  number = {UCAM-CL-TR-637}, 
  year = {2005}, 
  month = {July}, 
  publisher = {University of Cambridge Computer Laboratory}, 
  abstract = {This is a short talk on topology of covert conflict, comprising joint work
        I've been doing with Ross Anderson. The background of this work is the following.
        We consider a conflict, and there are parties to the conflict. There is
        communication going on that can be abstracted as a network of nodes (parties) and
        links (social ties between the nodes). We contend that once you've got a conflict
        and you've got enough parties to it, these guys start communicating as a result
        of the conflict. They form connections, that influences the conflict, and the
        dynamics of the conflict in turn feeds the connectivity of the unfolding network.
        Modern conflicts often turn on connectivity: consider, for instance, anything
        from the American army's attack on the Taleban in Afghanistan, and elsewhere, or
        medics who are trying to battle a disease, like Aids, or anything else. All of
        these turn on, making strategic decisions about which nodes to go after in the
        network. For instance, you could consider that a good first place to give condoms
        out and start any Aids programme, would be with prostitutes}, 
  doi = {10.1007/978-3-540-77156-2}, 
  url = {http://www.springerlink.com/content/p885q38262486876/}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/UCAM-CL-TR-637.pdf}, 
%%%%% ERROR: Missing field
% www_section = {?????},
}
UREbreak06
@conference{UREbreak06,
  title = {Breaking Four Mix-related Schemes Based on Universal Re-encryption}, 
  author = {George Danezis}, 
  booktitle = {Proceedings of Information Security Conference 2006}, 
  organization = {Springer-Verlag}, 
  year = {2006}, 
  month = {September}, 
  publisher = {Springer-Verlag}, 
  abstract = {Universal Re-encryption allows El-Gamal ciphertexts to be re-encrypted
        without knowledge of their corresponding public keys. This has made it an
        enticing building block for anonymous communications protocols. In this work we
        analyze four schemes related to mix networks that make use of Universal
        Re-encryption and find serious weaknesses in all of them. Universal Re-encryption
        of signatures is open to existential forgery; two-mix schemes can be fully
        compromised by a passive adversary observing a single message close to the
        sender; the fourth scheme, the rWonGoo anonymous channel, turns out to be less
        secure than the original Crowds scheme, on which it is based. Our attacks make
        extensive use of unintended {\textquotedblleft}services{\textquotedblright}
        provided by the network nodes acting as decryption and re-routing oracles.
        Finally, our attacks against rWonGoo demonstrate that anonymous channels are not
        automatically composable: using two of them in a careless manner makes the system
        more vulnerable to attack}, 
  www_section = {traffic analysis, universal re-encryption}, 
  doi = {10.1007/s10207-007-0033-y}, 
  url = {http://www.springerlink.com/content/x038u85171776236/}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/UREbreak06.pdf}, 
}
VenHeTon07
@booklet{VenHeTon07,
  title = {Anonymous Networking amidst Eavesdroppers}, 
  author = {Parvathinathan Venkitasubramaniam and Ting He and Lang Tong}, 
  year = {2007}, 
  month = {October}, 
  abstract = {The problem of security against packet timing based traffic analysis in
        wireless networks is considered in this work. An analytical measure of
        "anonymity" of routes in eavesdropped networks is proposed using the
        information-theoretic equivocation. For a physical layer with orthogonal
        transmitter directed signaling, scheduling and relaying techniques are designed
        to maximize achievable network performance for any desired level of anonymity.
        The network performance is measured by the total rate of packets delivered from
        the sources to destinations under strict latency and medium access constraints.
        In particular, analytical results are presented for two scenarios: For a single
        relay that forwards packets from m users, relaying strategies are provided that
        minimize the packet drops when the source nodes and the relay generate
        independent transmission schedules. A relay using such an independent scheduling
        strategy is undetectable by an eavesdropper and is referred to as a covert relay.
        Achievable rate regions are characterized under strict and average delay
        constraints on the traffic, when schedules are independent Poisson processes. For
        a multihop network with an arbitrary anonymity requirement, the problem of
        maximizing the sum-rate of flows (network throughput) is considered. A randomized
        selection strategy to choose covert relays as a function of the routes is
        designed for this purpose. Using the analytical results for a single covert
        relay, the strategy is optimized to obtain the maximum achievable throughput as a
        function of the desired level of anonymity. In particular, the
        throughput-anonymity relation for the proposed strategy is shown to be equivalent
        to an information-theoretic rate-distortion function}, 
  www_section = {Rate-Distortion, secrecy, traffic analysis}, 
  url = {http://cat.inist.fr/?aModele=afficheN\&cpsidt=20411836}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/0710.4903v1.pdf}, 
}
Waidner:1990:DCD:111563.111630
@conference{Waidner:1990:DCD:111563.111630,
  title = {The dining cryptographers in the disco: unconditional sender and recipient
        untraceability with computationally secure serviceability}, 
  author = {Michael Waidner and Birgit Pfitzmann}, 
  booktitle = {EUROCRYPT'89--Proceedings of the workshop on the theory and application of
        cryptographic techniques on Advances in cryptology}, 
  organization = {Springer-Verlag New York, Inc}, 
  year = {1990}, 
  month = {April}, 
  address = {Houthalen, Belgium}, 
  pages = {0--690}, 
  publisher = {Springer-Verlag New York, Inc}, 
  series = {EUROCRYPT '89}, 
  abstract = {In Journal of Cryptology 1/1 (1988) 65-75 (= [Chau_88]), David Chaum
        describes a beautiful technique, the DC-net, which should allow participants to
        send and receive messages anonymously in an arbitrary network. The untraceability
        of the senders is proved to be unconditional, but that of the recipients
        implicitly assumes a reliable broadcast network. This assumption is unrealistic
        in some networks, but it can be removed completely by using the fail-stop key
        generation schemes by Waidner (these proceedings, =[Waid_89]). In both cases,
        however, each participant can untraceably and permanently disrupt the
        entireDC-net. We present a protocol which guarantees unconditional
        untraceability, the original goal of the DC-net, onthe inseparability assumption
        (i.e. the attacker must be unable to prevent honest participants
        fromcommunicating, which is considerably less than reliable broadcast), and
        computationally secureserviceability: Computationally restricted disrupters can
        be identified and removed from the DC-net. On the one hand, our solution is based
        on the lovely idea by David Chaum [Chau_88 {\textsection} 2.5] of setting traps
        for disrupters. He suggests a scheme to guarantee unconditional untraceability
        and computationally secure serviceability, too, but on the reliable broadcast
        assumption. The same scheme seems to be used by Bos and den Boer (these
        proceedings, = [BoBo_89]). We show that this scheme needs some changes and
        refinements before being secure, even on the reliable broadcast assumption. On
        the other hand, our solution is based on the idea of digital signatures whose
        forgery by an unexpectedly powerful attacker is provable, which might be of
        independent interest. We propose such a (one-time) signature scheme based on
        claw-free permutation pairs; the forgery of signatures is equivalent to finding
        claws, thus in a special case to the factoring problem. In particular, with such
        signatures we can, for the first time, realize fail-stop Byzantine Agreement, and
        also adaptive Byzantine Agreement, i.e. Byzantine Agreement which can only be
        disrupted by an attacker who controls at least a third of all participants and
        who can forge signatures. We also sketch applications of these signatures to a
        payment system, solving disputes about shared secrets, and signatures which
        cannot be shown round}, 
  www_section = {anonymity, arbitrary network, cryptology, DC-net}, 
  isbn = {3-540-53433-4}, 
  url = {http://dl.acm.org/citation.cfm?id=111563.111630}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/EUROCRYPT\%2789\%20-\%20Waidner\%26Pfitzmann\%20-\%20The\%20dining\%20cryptographers\%20in\%20the\%20disco\%20.pdf},
}
Waldman01tangler:a
@conference{Waldman01tangler:a,
  title = {Tangler: A Censorship-Resistant Publishing System Based On Document
        Entanglements}, 
  author = {Marc Waldman and David Mazi{\`e}res}, 
  booktitle = {In Proceedings of the 8th ACM Conference on Computer and Communications
        Security}, 
  year = {2001}, 
  pages = {126--135}, 
  abstract = {The basic idea is to protect documents by making it impossible to remove one
        document from the system without loosing others. The underlying assumption that
        the adversary cares about collateral damage of this kind is a bit far fetched.
        Also, the entanglement doubles the amount of data that needs to be moved to
        retrieve a document}, 
  url = {http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.24.3781\&rep=rep1\&type=pdf},
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/tangler.pdf}, 
%%%%% ERROR: Missing field
% www_section = {?????},
}
Wang05erasure-codingbased
@conference{Wang05erasure-codingbased,
  title = {Erasure-coding based routing for opportunistic networks}, 
  author = {Wang, Yong and Sushant Jain and Martonosi, Margaret and Fall, Kevin}, 
  booktitle = {Erasure-coding based routing for opportunistic networks}, 
  organization = {ACM Press}, 
  year = {2005}, 
  pages = {229--236}, 
  publisher = {ACM Press}, 
  abstract = {mobility is a challenging problem because disconnections are prevalent and
        lack of knowledge about network dynamics hinders good decision making. Current
        approaches are primarily based on redundant transmissions. They have either high
        overhead due to excessive transmissions or long delays due to the possibility of
        making wrong choices when forwarding a few redundant copies. In this paper, we
        propose a novel forwarding algorithm based on the idea of erasure codes. Erasure
        coding allows use of a large number of relays while maintaining a constant
        overhead, which results in fewer cases of long delays. We use simulation to
        compare the routing performance of using erasure codes in DTN with four other
        categories of forwarding algorithms proposed in the literature. Our simulations
        are based on a real-world mobility trace collected in a large outdoor wild-life
        environment. The results show that the erasure-coding based algorithm provides
        the best worst-case delay performance with a fixed amount of overhead. We also
        present a simple analytical model to capture the delay characteristics of
        erasure-coding based forwarding, which provides insights on the potential of our
        approach}, 
  www_section = {delay tolerant network, routing}, 
  doi = {10.1145/1080139.1080140}, 
  url = {http://doi.acm.org/10.1145/1080139.1080140}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.79.364.pdf}, 
}
Wang05findingcollisions
@conference{Wang05findingcollisions,
  title = {Finding Collisions in the Full SHA-1}, 
  author = {Xiaoyun Wang and Yiqun Lisa Yin and Hongbo Yu}, 
  booktitle = {In Proceedings of Crypto}, 
  organization = {Springer}, 
  year = {2005}, 
  pages = {17--36}, 
  publisher = {Springer}, 
  abstract = {In this paper, we present new collision search attacks on the hash function
        SHA-1. We show that collisions of SHA-1 can be found with complexity less than 2
        69 hash operations. This is the first attack on the full 80-step SHA-1 with
        complexity less than the 2 80 theoretical bound. Keywords: Hash functions,
        collision search attacks, SHA-1, SHA-0. 1}, 
  www_section = {cryptography}, 
  isbn = {978-3-540-28114-6}, 
  doi = {10.1007/11535218}, 
  url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.94.4261}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/SHA1AttackProceedingVersion.pdf},
}
Wang:2008:GAI:1412757.1412971
@article{Wang:2008:GAI:1412757.1412971,
  title = {A game-theoretic analysis of the implications of overlay network traffic on ISP
        peering}, 
  author = {Wang, Jessie Hui and Chiu, Dah Ming and Lui, John C. S.}, 
  journal = {Computer Networks}, 
  volume = {52}, 
  year = {2008}, 
  month = {October}, 
  address = {New York, NY, USA}, 
  pages = {2961--2974}, 
  publisher = {Elsevier North-Holland, Inc}, 
  abstract = {Inter-ISP traffic flow determines the settlement between ISPs and affects the
        perceived performance of ISP services. In today's Internet, the inter-ISP traffic
        flow patterns are controlled not only by ISPs' policy-based routing configuration
        and traffic engineering, but also by application layer routing. The goal of this
        paper is to study the economic implications of this shift in Internet traffic
        control assuming rational ISPs and subscribers. For this purpose, we build a
        general traffic model that predicts traffic patterns based on subscriber
        distribution and abstract traffic controls such as caching functions and
        performance sensitivity functions. We also build a game-theoretic model of
        subscribers picking ISPs, and ISPs making provisioning and peering decisions. In
        particular, we apply this to a local market where two ISPs compete for market
        share of subscribers under two traffic patterns: ''Web'' and ''P2P overlay'',
        that typifies the transition the current Internet is going through. Our
        methodology can be used to quantitatively demonstrate that (1) while economy of
        scale is the predominant property of the competitive ISP market, P2P traffic may
        introduce unfair distribution of peering benefit (i.e. free-riding); (2) the
        large ISP can restore more fairness by reducing its private capacity (bandwidth
        throttling), which has the drawback of hurting business growth; and (3) ISPs can
        reduce the level of peering (e.g. by reducing peering bandwidth) to restore more
        fairness, but this has the side-effect of also reducing the ISPs' collective
        bargaining power towards subscribers}, 
  www_section = {game theory, isp, Network management, Peering, Traffic model}, 
  issn = {1389-1286}, 
  doi = {10.1016/j.comnet.2008.06.014}, 
  url = {http://dl.acm.org/citation.cfm?id=1412757.1412971}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Computer\%20Networks\%20-\%20Wang\%2C\%20Chiu\%20\%26\%20Lui\%20-\%20Overlay\%20network\%20traffic\%20on\%20ISP\%20peering.pdf},
}
WangCJ05
@conference{WangCJ05,
  title = {Tracking anonymous peer-to-peer VoIP calls on the internet}, 
  author = {Xinyuan Wang and Shiping Chen and Sushil Jajodia}, 
  booktitle = {Proceedings of the ACM Conference on Computer and Communications Security}, 
  organization = {ACM New York, NY, USA}, 
  year = {2005}, 
  month = {November}, 
  pages = {81--91}, 
  publisher = {ACM New York, NY, USA}, 
  abstract = {Peer-to-peer VoIP calls are becoming increasingly popular due to their
        advantages in cost and convenience. When these calls are encrypted from end to
        end and anonymized by low latency anonymizing network, they are considered by
        many people to be both secure and anonymous.In this paper, we present a watermark
        technique that could be used for effectively identifying and correlating
        encrypted, peer-to-peer VoIP calls even if they are anonymized by low latency
        anonymizing networks. This result is in contrast to many people's perception. The
        key idea is to embed a unique watermark into the encrypted VoIP flow by slightly
        adjusting the timing of selected packets. Our analysis shows that it only takes
        several milliseconds time adjustment to make normal VoIP flows highly unique and
        the embedded watermark could be preserved across the low latency anonymizing
        network if appropriate redundancy is applied. Our analytical results are backed
        up by the real-time experiments performed on leading peer-to-peer VoIP client and
        on a commercially deployed anonymizing network. Our results demonstrate that (1)
        tracking anonymous peer-to-peer VoIP calls on the Internet is feasible and (2)
        low latency anonymizing networks are susceptible to timing attacks}, 
  www_section = {anonymity, P2P}, 
  isbn = {1-59593-226-7}, 
  doi = {10.1145/1102120.1102133}, 
  url = {http://portal.acm.org/citation.cfm?id=1102120.1102133}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/WangCJ05.pdf}, 
}
Widmer_abstractnetwork
@booklet{Widmer_abstractnetwork,
  title = {ABSTRACT Network Coding for Efficient Communication in Extreme Networks}, 
  author = {J{\"o}rg Widmer}, 
  year = {2005}, 
  abstract = {Some forms of ad-hoc networks need to operate in extremely
        performance-challenged environments where end-to-end connectivity is rare. Such
        environments can be found for example in very sparse mobile networks where nodes
        {\textquotedblright}meet {\textquotedblright} only occasionally and are able to
        exchange information, or in wireless sensor networks where nodes sleep most of
        the time to conserve energy. Forwarding mechanisms in such networks usually
        resort to some form of intelligent flooding, as for example in probabilistic
        routing. We propose a communication algorithm that significantly reduces the
        overhead of probabilistic routing algorithms, making it a suitable building block
        for a delay-tolerant network architecture. Our forwarding scheme is based on
        network coding. Nodes do not simply forward packets they overhear but may send
        out information that is coded over the contents of several packets they received.
        We show by simulation that this algorithm achieves the reliability and robustness
        of flooding at a small fraction of the overhead}, 
  www_section = {ad-hoc networks, delay tolerant network, routing}, 
  isbn = {1-59593-026-4}, 
  doi = {10.1145/1080139.1080147}, 
  url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.102.5368}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.102.5368.pdf}, 
}
WongSirer2008ApproximateMatching
@booklet{WongSirer2008ApproximateMatching,
  title = {Approximate Matching for Peer-to-Peer Overlays with Cubit}, 
  author = {Bernard Wong, Aleksandrs Slivkins and Emin G{\"u}n Sirer}, 
  year = {2008}, 
  publisher = {Cornell University, Computing and Information Science Technical Report}, 
  abstract = {Keyword search is a critical component in most content retrieval systems.
        Despite the emergence of completely decentralized and efficient peer-to-peer
        techniques for content distribution, there have not been similarly efficient,
        accurate, and decentralized mechanisms for
        contentdiscoverybasedonapproximatesearchkeys. Inthis paper, we present a scalable
        and efficient peer-to-peer system calledCubitwith anewsearchprimitivethat can
        efficientlyfindthe k dataitemswithkeysmostsimilarto a givensearchkey. Thesystem
        worksbycreatingakeyword metric space that encompasses both the nodes and
        theobjectsinthesystem,wherethedistancebetweentwo points is a measure of the
        similarity between the strings thatthepointsrepresent. It providesa
        loosely-structured overlaythat can efficientlynavigatethis space. We evaluate
        Cubit through both a real deployment as a search plugin for a popular BitTorrent
        client and a large-scale simulation and show that it provides an efficient,
        accurateandrobustmethodto handleimprecisestringsearch infilesharingapplications.
        1}, 
  www_section = {distributed hash table, p2psockets}, 
}
Wright01ananalysis
@conference{Wright01ananalysis,
  title = {An Analysis of the Degradation of Anonymous Protocols}, 
  author = {Matthew Wright and Micah Adler and Brian Neil Levine and Clay Shields}, 
  booktitle = {Network and Distributed System Security Symposium}, 
  year = {2001}, 
  address = {San Diego, California}, 
  abstract = {There have been a number of protocols proposed for anonymous network
        communication. In this paper we prove that when a particular initiator continues
        communication with a particular responder across path reformations, existing
        protocols are subject to attacks by corrupt group members that degrade the
        anonymity of each protocol over time. We use this result to place an upper bound
        on how long existing protocols including Crowds, Onion Routing, Hordes, and
        DC-Net, can maintain anonymity in the face of the attacks described. Our results
        show that fully-connected DC-Net is the most resilient to these attacks, but is
        subject to simple denial-of-service attacks. Additionally, we show how a variant
        of the attack allows attackers to setup other participants to falsely appear to
        be the initiator of a connection}, 
  url = {http://freehaven.net/anonbib/cache/wright02.pdf}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/wright02.pdf}, 
%%%%% ERROR: Missing field
% www_section = {?????},
}
Wright:2004
@article{Wright:2004,
  title = {The Predecessor Attack: An Analysis of a Threat to Anonymous Communications
        Systems}, 
  author = {Matthew Wright and Micah Adler and Brian Neil Levine and Clay Shields}, 
  journal = {ACM Transactions on Information and System Security (TISSEC)}, 
  volume = {7}, 
  number = {7}, 
  year = {2004}, 
  month = {November}, 
  pages = {489--522}, 
  abstract = {There have been a number of protocols proposed for anonymous network
        communication. In this paper, we investigate attacks by corrupt group members
        that degrade the anonymity of each protocol over time. We prove that when a
        particular initiator continues communication with a particular responder across
        path reformations, existing protocols are subject to the attack. We use this
        result to place an upper bound on how long existing protocols, including Crowds,
        Onion Routing, Hordes, Web Mixes, and DC-Net, can maintain anonymity in the face
        of the attacks described. This provides a basis for comparing these protocols
        against each other. Our results show that fully connected DC-Net is the most
        resilient to these attacks, but it suffers from scalability issues that keep
        anonymity group sizes small. We also show through simulation that the underlying
        topography of the DC-Net affects the resilience of the protocol: as the number of
        neighbors a node has increases the strength of the protocol increases, at the
        cost of higher communication overhead}, 
  www_section = {anonymity, predecessor attack, privacy}, 
  issn = {1094-9224}, 
  doi = {10.1145/1042031.1042032}, 
  url = {http://portal.acm.org/citation.cfm?id=1042031.1042032\&coll=GUIDE\&dl=GUIDE\&CFID=76057600\&CFTOKEN=15386893},
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Wright-2004.pdf}, 
}
WrightMM06
@article{WrightMM06,
  title = {On Inferring Application Protocol Behaviors in Encrypted Network Traffic}, 
  author = {Charles Wright and Fabian Monrose and Gerald M. Masson}, 
  journal = {Journal of Machine Learning Research}, 
  volume = {7}, 
  year = {2006}, 
  address = {Cambridge, MA, USA}, 
  pages = {2745--2769}, 
  publisher = {MIT Press}, 
  abstract = {Several fundamental security mechanisms for restricting access to network
        resources rely on the ability of a reference monitor to inspect the contents of
        traffic as it traverses the network. However, with the increasing popularity of
        cryptographic protocols, the traditional means of inspecting packet contents to
        enforce security policies is no longer a viable approach as message contents are
        concealed by encryption. In this paper, we investigate the extent to which common
        application protocols can be identified using only the features that remain
        intact after encryption---namely packet size, timing, and direction. We first
        present what we believe to be the first exploratory look at protocol
        identification in encrypted tunnels which carry traffic from many TCP connections
        simultaneously, using only post-encryption observable features. We then explore
        the problem of protocol identification in individual encrypted TCP connections,
        using much less data than in other recent approaches. The results of our
        evaluation show that our classifiers achieve accuracy greater than 90\% for
        several protocols in aggregate traffic, and, for most protocols, greater than
        80\% when making fine-grained classifications on single connections. Moreover,
        perhaps most surprisingly, we show that one can even estimate the number of live
        connections in certain classes of encrypted tunnels to within, on average, better
        than 20\%}, 
  www_section = {hidden Markov models, traffic classification}, 
  issn = {1533-7928}, 
  url = {http://portal.acm.org/citation.cfm?id=1248647}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/WrightMM06.pdf}, 
}
Xie:2008:PPP:1402946.1402999
@article{Xie:2008:PPP:1402946.1402999,
  title = {P4P: Provider Portal for Applications}, 
  author = {Xie, Haiyong and Yang, Y. Richard and Krishnamurthy, Arvind and Liu, Yanbin
        Grace and Silberschatz, Abraham}, 
  journal = {SIGCOMM Computer Communication Review}, 
  volume = {38}, 
  year = {2008}, 
  month = {August}, 
  address = {New York, NY, USA}, 
  pages = {351--362}, 
  publisher = {ACM}, 
  abstract = {As peer-to-peer (P2P) emerges as a major paradigm for scalable network
        application design, it also exposes significant new challenges in achieving
        efficient and fair utilization of Internet network resources. Being largely
        network-oblivious, many P2P applications may lead to inefficient network resource
        usage and/or low application performance. In this paper, we propose a simple
        architecture called P4P to allow for more effective cooperative traffic control
        between applications and network providers. We conducted extensive simulations
        and real-life experiments on the Internet to demonstrate the feasibility and
        effectiveness of P4P. Our experiments demonstrated that P4P either improves or
        maintains the same level of application performance of native P2P applications,
        while, at the same time, it substantially reduces network provider cost compared
        with either native or latency-based localized P2P applications}, 
  www_section = {network application, network architecture, P2P}, 
  issn = {0146-4833}, 
  doi = {http://doi.acm.org/10.1145/1402946.1402999}, 
  url = {http://doi.acm.org/10.1145/1402946.1402999}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/SIGCOMM\%20Comput.\%20Commun.\%20Rev.\%20-\%20P4P\%3A\%20Provider\%20Portal\%20for\%20Applications.pdf},
}
Xu98lowdensity
@article{Xu98lowdensity,
  title = {Low Density MDS Codes and Factors of Complete Graphs}, 
  author = {Lihao Xu and Vasken Bohossian and Jehoshua Bruck and David Wagner}, 
  journal = {IEEE Trans. on Information Theory}, 
  volume = {45}, 
  year = {1998}, 
  pages = {1817--1826}, 
  abstract = {We reveal an equivalence relation between the construction of a new class of
        low density MDS array codes, that we call B-Code, and a combinatorial problem
        known as perfect onefactorization of complete graphs. We use known perfect
        one-factors of complete graphs to create constructions and decoding algorithms
        for both B-Code and its dual code. B-Code and its dual are optimal in the sense
        that (i) they are MDS, (ii) they have an optimal encoding property, i.e., the
        number of the parity bits that are affected by change of a single information bit
        is minimal and (iii) they have optimal length. The existence of perfect
        one-factorizations for every complete graph with an even number of nodes is a 35
        years long conjecture in graph theory. The construction of B-codes of arbitrary
        odd length will provide an affirmative answer to the conjecture}, 
  www_section = {array codes, low density, MDS Codes, update complexity}, 
  url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.42.8899}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.42.8899.pdf}, 
}
XuFZBCZ05
@conference{XuFZBCZ05,
  title = {SAS: A Scalar Anonymous Communication System}, 
  author = {Hongyun Xu and Xinwen Fu and Ye Zhu and Riccardo Bettati and Jianer Chen and
        Wei Zhao}, 
  booktitle = {Proceedings of ICCNMC}, 
  organization = {Springer Berlin / Heidelberg}, 
  year = {2005}, 
  pages = {452--461}, 
  publisher = {Springer Berlin / Heidelberg}, 
  abstract = {Anonymity technologies have gained more and more attention for communication
        privacy. In general, users obtain anonymity at a certain cost in an anonymous
        communication system, which uses rerouting to increase the system's robustness.
        However, a long rerouting path incurs large overhead and decreases the quality of
        service (QoS). In this paper, we propose the Scalar Anonymity System (SAS) in
        order to provide a tradeoff between anonymity and cost for different users with
        different requirements. In SAS, by selecting the level of anonymity, a user
        obtains the corresponding anonymity and QoS and also sustains the corresponding
        load of traffic rerouting for other users. Our theoretical analysis and
        simulation experiments verify the effectiveness of SAS}, 
  www_section = {anonymity, privacy, QoS}, 
  isbn = {978-3-540-28102-3}, 
  doi = {10.1007/11534310}, 
  url = {http://www.springerlink.com/content/9b2k6u5wval6cep1/}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.66.7970.pdf}, 
}
Yang:2008:ABD:1403027.1403032
@conference{Yang:2008:ABD:1403027.1403032,
  title = {Auction, but don't block}, 
  author = {Yang, Xiaowei}, 
  booktitle = {NetEcon'08. Proceedings of the 3rd International Workshop on Economics of
        Networked Systems}, 
  organization = {ACM}, 
  year = {2008}, 
  month = {August}, 
  address = {Seattle, WA, USA}, 
  pages = {19--24}, 
  publisher = {ACM}, 
  series = {NetEcon '08}, 
  abstract = {This paper argues that ISP's recent actions to block certain applications
        (e.g. BitTorrent) and attempts to differentiate traffic could be a signal of
        bandwidth scarcity. Bandwidth-intensive applications such as VoD could have
        driven the traffic demand to the capacity limit of their networks. This paper
        proposes to let ISPs auction their bandwidth, instead of blocking or degrading
        applications. A user places a bid in a packet header based on how much he values
        the communication. When congestion occurs, ISPs allocate bandwidth to those users
        that value their packets the most, and charge them the Vickrey auction price. We
        outline a design that addresses the technical challenges to support this auction
        and analyze its feasibility. Our analysis suggests that the design have
        reasonable overhead and could be feasible with modern hardware}, 
  www_section = {auction, Internet, net-neutrality}, 
  isbn = {978-1-60558-179-8}, 
  doi = {http://doi.acm.org/10.1145/1403027.1403032}, 
  url = {http://doi.acm.org/10.1145/1403027.1403032}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/NetEcon\%2708\%20-\%20Yang\%20-\%20Auction\%2C\%20but\%20don\%27t\%20block.pdf},
}
Yang:2011:USN:2068816.2068841
@conference{Yang:2011:USN:2068816.2068841,
  title = {Uncovering social network sybils in the wild}, 
  author = {Yang, Zhi and Wilson, Christo and Wang, Xiao and Gao, Tingting and Ben Y. Zhao
        and Dai, Yafei}, 
  booktitle = {Proceedings of the 2011 ACM SIGCOMM conference on Internet measurement
        conference}, 
  organization = {ACM}, 
  year = {2011}, 
  month = {November}, 
  address = {Berlin, Germany}, 
  pages = {259--268}, 
  publisher = {ACM}, 
  series = {IMC '11}, 
  abstract = {Sybil accounts are fake identities created to unfairly increase the power or
        resources of a single user. Researchers have long known about the existence of
        Sybil accounts in online communities such as file-sharing systems, but have not
        been able to perform large scale measurements to detect them or measure their
        activities. In this paper, we describe our efforts to detect, characterize and
        understand Sybil account activity in the Renren online social network (OSN). We
        use ground truth provided by Renren Inc. to build measurement based Sybil account
        detectors, and deploy them on Renren to detect over 100,000 Sybil accounts. We
        study these Sybil accounts, as well as an additional 560,000 Sybil accounts
        caught by Renren, and analyze their link creation behavior. Most interestingly,
        we find that contrary to prior conjecture, Sybil accounts in OSNs do not form
        tight-knit communities. Instead, they integrate into the social graph just like
        normal users. Using link creation timestamps, we verify that the large majority
        of links between Sybil accounts are created accidentally, unbeknownst to the
        attacker. Overall, only a very small portion of Sybil accounts are connected to
        other Sybils with social links. Our study shows that existing Sybil defenses are
        unlikely to succeed in today's OSNs, and we must design new techniques to
        effectively detect and defend against Sybil attacks}, 
  www_section = {online social networks, sybil, sybil accountsm}, 
  isbn = {978-1-4503-1013-0}, 
  doi = {10.1145/2068816.2068841}, 
  url = {http://doi.acm.org/10.1145/2068816.2068841}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/SIGCOMM\%2711\%20-\%20Uncovering\%20social\%20network\%20sybils.pdf},
}
Yeoh:2008:BAB:1402298.1402307
@article{Yeoh:2008:BAB:1402298.1402307,
  title = {BnB-ADOPT: an asynchronous branch-and-bound DCOP algorithm}, 
  author = {Yeoh, William and Felner, Ariel and Koenig, Sven}, 
  journal = {Journal of Artificial Intelligence Research}, 
  volume = {38}, 
  year = {2010}, 
  address = {Richland, SC}, 
  pages = {85--133}, 
  publisher = {International Foundation for Autonomous Agents and Multiagent Systems}, 
  abstract = {Distributed constraint optimization (DCOP) problems are a popular way of
        formulating and solving agent-coordination problems. It is often desirable to
        solve DCOP problems optimally with memory-bounded and asynchronous algorithms. We
        introduce Branch-and-Bound ADOPT (BnB-ADOPT), a memory-bounded asynchronous DCOP
        algorithm that uses the message passing and communication framework of ADOPT, a
        well known memory-bounded asynchronous DCOP algorithm, but changes the search
        strategy of ADOPT from best-first search to depth-first branch-and-bound search.
        Our experimental results show that BnB-ADOPT is up to one order of magnitude
        faster than ADOPT on a variety of large DCOP problems and faster than NCBB, a
        memory-bounded synchronous DCOP algorithm, on most of these DCOP problems}, 
  www_section = {agent cooperation, BnB-ADOPT, DCOP, distributed constraint optimization,
        distributed problem solving}, 
  issn = {1076-9757}, 
  doi = {10.1613/jair.2849}, 
  url = {http://www.jair.org/papers/paper2849.html}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Journal\%20of\%20AI\%20-\%20BnB-ADOPT.pdf},
}
Yokoo91distributedconstraint
@article{Yokoo91distributedconstraint,
  title = {Distributed Constraint Optimization as a Formal Model of Partially Adversarial
        Cooperation}, 
  author = {Makoto Yokoo and Edmund H. Durfee}, 
  journal = {unknown}, 
  institution = {University of Michigan}, 
  number = {CSE-TR-101-9}, 
  year = {1991}, 
  address = {Ann Arbor, MI, United States}, 
  type = {Tech report}, 
  abstract = {In this paper, we argue that partially adversarial and partially cooperative
        (PARC) problems in distributed arti cial intelligence can be mapped into a
        formalism called distributed constraint optimization problems (DCOPs), which
        generalize distributed constraint satisfaction problems [Yokoo, et al. 90] by
        introducing weak constraints (preferences). We discuss several solution criteria
        for DCOP and clarify the relation between these criteria and di erent levels of
        agent rationality [Rosenschein and Genesereth 85], and show the algorithms for
        solving DCOPs in which agents incrementally exchange only necessary information
        to converge on a mutually satis able bsolution}, 
  www_section = {artificial intelligence, DCOP, PARC, partially adversial cooperation}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Tech\%20report\%20-\%20DCOP\%20as\%20a\%20formal\%20model\%20of\%20PARC.pdf},
  url = {https://bibliography.gnunet.org}, 
}
You04evaluationof
@booklet{You04evaluationof,
  title = {Evaluation of Efficient Archival Storage Techniques}, 
  author = {Lawrence L. You and Christos Karamanolis}, 
  year = {2004}, 
  abstract = {The ever-increasing volume of archival data that need to be retained for long
        periods of time has motivated the design of low-cost, high-efficiency storage
        systems. Inter-file compression has been proposed as a technique to improve
        storage efficiency by exploiting the high degree of similarity among archival
        data. We evaluate the two main inter-file compression techniques, data chunking
        and delta encoding, and compare them with traditional intra-file compression. We
        report on experimental results from a range of representative archival data
        sets}, 
  www_section = {compression, storage}, 
  url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.11.1341}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.11.1341.pdf}, 
}
You05deepstore:
@conference{You05deepstore:,
  title = {Deep Store: An archival storage system architecture}, 
  author = {Lawrence L. You and Kristal T. Pollack and Darrell D. E. Long}, 
  booktitle = {In Proceedings of the 21st International Conference on Data Engineering
        (ICDE'05)}, 
  organization = {IEEE}, 
  year = {2005}, 
  pages = {804--815}, 
  publisher = {IEEE}, 
  abstract = {We present the Deep Store archival storage architecture, a large-scale
        storage system that stores immutable dataefficiently and reliably for long
        periods of time. Archived data is stored across a cluster of nodes and recorded
        to hard disk. The design differentiates itself from traditional file systems by
        eliminating redundancy within and across files, distributing content for
        scalability, associating rich metadata with content, and using variable levels of
        replication based on the importance or degree of dependency of each piece of
        stored data. We evaluate the foundations of our design, including PRESIDIO, a
        virtual content-addressable storage framework with multiple methods for
        inter-file and intra-file compression that effectively addresses the
        data-dependent variability of data compression. We measure content and metadata
        storage efficiency, demonstrate the need for a variable-degree replication model,
        and provide preliminary results for storage performance}, 
  www_section = {storage}, 
  isbn = {0-7695-2285-8}, 
  doi = {10.1109/ICDE.2005.47}, 
  url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.66.6928}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.76.5241.pdf}, 
}
Yu04leopard:a
@booklet{Yu04leopard:a,
  title = {Leopard: A locality-aware peer-to-peer system with no hot spot}, 
  author = {Yinzhe Yu and Sanghwan Lee and Zhi-li Zhang}, 
  year = {2004}, 
  publisher = {In: the 4th IFIP Networking Conference (Networking'05)}, 
  abstract = {A fundamental challenge in Peer-To-Peer (P2P) systems is how to locate
        objects of interest, namely, the look-up service problem. A key break-through
        towards a scalable and distributed solution of this problem is the distributed
        hash}, 
  www_section = {distributed hash table, P2P}, 
  url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.134.3912}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/INFOCOM05_Poster.pdf}, 
}
Yu:2006:SDA:1159913.1159945
@conference{Yu:2006:SDA:1159913.1159945,
  title = {SybilGuard: defending against sybil attacks via social networks}, 
  author = {Yu, Haifeng and Kaminsky, Michael and Gibbons, Phillip B. and Flaxman,
        Abraham}, 
  booktitle = {SIGCOMM'06. Proceedings of the 2006 conference on Applications,
        Technologies, Architectures, and Protocols for Computer Communications}, 
  organization = {ACM}, 
  year = {2006}, 
  month = {September}, 
  address = {Pisa, Italy}, 
  pages = {267--278}, 
  publisher = {ACM}, 
  series = {SIGCOMM '06}, 
  abstract = {Peer-to-peer and other decentralized,distributed systems are known to be
        particularly vulnerable to sybil attacks. In a sybil attack,a malicious user
        obtains multiple fake identities and pretends to be multiple, distinct nodes in
        the system. By controlling a large fraction of the nodes in the system,the
        malicious user is able to "out vote" the honest users in collaborative tasks such
        as Byzantine failure defenses. This paper presents SybilGuard, a novel protocol
        for limiting the corruptive influences of sybil attacks.Our protocol is based on
        the "social network "among user identities, where an edge between two identities
        indicates a human-established trust relationship. Malicious users can create many
        identities but few trust relationships. Thus, there is a disproportionately-small
        "cut" in the graph between the sybil nodes and the honest nodes. SybilGuard
        exploits this property to bound the number of identities a malicious user can
        create.We show the effectiveness of SybilGuard both analytically and
        experimentally}, 
  www_section = {social networks, Sybil attack, sybilGuard}, 
  isbn = {1-59593-308-5}, 
  doi = {http://doi.acm.org/10.1145/1159913.1159945}, 
  url = {http://doi.acm.org/10.1145/1159913.1159945}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/SIGCOMM\%2706\%20-\%20SybilGuard.pdf},
}
Zhao01tapestry:an
@booklet{Zhao01tapestry:an,
  title = {Tapestry: An Infrastructure for Fault-tolerant Wide-area Location and Routing}, 
  author = {Ben Y. Zhao and Ben Y. Zhao and John Kubiatowicz and John Kubiatowicz and
        Anthony D. Joseph and Anthony D. Joseph}, 
  year = {2001}, 
  abstract = {In today's chaotic network, data and services are mobile and replicated
        widely for availability, durability, and locality. Components' within this
        infrastructure interact in rich and complex ways, greatly stressing traditional
        approaches to name service and routing. This paper explores an alternative to
        traditional approaches called Tapestry. Tapestry is an overlay location and
        routing infrastructure that provides location-independent routing of messages
        directly to the closest copy of an object or service using only point-to-point
        links and without centralized resources. The routing and directory information
        within this' infrastructure is purely soft state and easily repaired. Tapestry is
        self-administering, fault-tolerant, and resilient under load. This paper
        presents' the architecture and algorithms of Tapestry and explores their
        advantages through a number of experiments}, 
  url = {http://portal.acm.org/citation.cfm?id=894116$\#$}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/CSD-01-1141.pdf}, 
%%%%% ERROR: Missing field
% www_section = {?????},
}
Zhuang05cashmere:resilient
@conference{Zhuang05cashmere:resilient,
  title = {Cashmere: Resilient anonymous routing}, 
  author = {Li Zhuang and Feng Zhou and Ben Y. Zhao and Antony Rowstron}, 
  booktitle = {In Proc. of NSDI}, 
  organization = {ACM/USENIX}, 
  year = {2005}, 
  publisher = {ACM/USENIX}, 
  abstract = {Anonymous routing protects user communication from identification by
        third-party observers. Existing anonymous routing layers utilize Chaum-Mixes for
        anonymity by relaying traffic through relay nodes called mixes. The source
        defines a static forwarding path through which traffic is relayed to the
        destination. The resulting path is fragile and shortlived: failure of one mix in
        the path breaks the forwarding path and results in data loss and jitter before a
        new path is constructed. In this paper, we propose Cashmere, a resilient
        anonymous routing layer built on a structured peer-to-peer overlay. Instead of
        single-node mixes, Cashmere selects regions in the overlay namespace as mixes.
        Any node in a region can act as the MIX, drastically reducing the probability of
        a mix failure. We analyze Cashmere's anonymity and measure its performance
        through simulation and measurements, and show that it maintains high anonymity
        while providing orders of magnitude improvement in resilience to network dynamics
        and node failures}, 
  url = {http://portal.acm.org/citation.cfm?id=1251203.1251225$\#$}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/cashmere.pdf}, 
%%%%% ERROR: Missing field
% www_section = {?????},
}
Zivan:2008:ALS:1402821.1402895
@conference{Zivan:2008:ALS:1402821.1402895,
  title = {Anytime local search for distributed constraint optimization}, 
  author = {Zivan, Roie}, 
  booktitle = {AAMAS'08--Proceedings of the 7th international joint conference on
        Autonomous agents and multiagent systems}, 
  organization = {International Foundation for Autonomous Agents and Multiagent Systems}, 
  year = {2008}, 
  month = {May}, 
  address = {Estoril, Portugal}, 
  pages = {1449--1452}, 
  publisher = {International Foundation for Autonomous Agents and Multiagent Systems}, 
  series = {AAMAS '08}, 
  abstract = {Most former studies of Distributed Constraint Optimization Problems (DisCOPs)
        search considered only complete search algorithms, which are practical only for
        relatively small problems. Distributed local search algorithms can be used for
        solving DisCOPs. However, because of the differences between the global
        evaluation of a system's state and the private evaluation of states by agents,
        agents are unaware of the global best state which is explored by the algorithm.
        Previous attempts to use local search algorithms for solving DisCOPs reported the
        state held by the system at the termination of the algorithm, which was not
        necessarily the best state explored. A general framework for implementing
        distributed local search algorithms for DisCOPs is proposed. The proposed
        framework makes use of a BFS-tree in order to accumulate the costs of the
        system's state in its different steps and to propagate the detection of a new
        best step when it is found. The resulting framework enhances local search
        algorithms for DisCOPs with the anytime property. The proposed framework does not
        require additional network load. Agents are required to hold a small (linear)
        additional space (beside the requirements of the algorithm in use). The proposed
        framework preserves privacy at a higher level than complete DisCOP algorithms
        which make use of a pseudo-tree (ADOPT, DPOP)}, 
  www_section = {algorithms, BFS-Tree, DCOP, DisCOPs, framework}, 
  isbn = {978-0-9817381-2-3}, 
  url = {http://dl.acm.org/citation.cfm?id=1402821.1402895}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/AAAI\%2708\%20-\%20Local\%20search\%20for\%20DCOP.pdf},
}
Zoels05thehybrid
@article{Zoels05thehybrid,
  title = {The Hybrid Chord Protocol: A Peer-to-peer Lookup Service for Context-Aware
        Mobile Applications}, 
  author = {Stefan Z{\"o}ls and R{\"u}diger Schollmeier and Wolfgang Kellerer and Anthony
        Tarlano}, 
  journal = {IEEE ICN, Reunion Island, April 2005. LNCS 3421}, 
  year = {2005}, 
  abstract = {A fundamental problem in Peer-to-Peer (P2P) overlay networks is how to
        efficiently find a node that shares a requested object. The Chord protocol is a
        distributed lookup protocol addressing this problem using hash keys to identify
        the nodes in the network and also the shared objects. However, when a node joins
        or leaves the Chord ring, object references have to be rearranged in order to
        maintain the hash key mapping rules. This leads to a heavy traffic load,
        especially when nodes stay in the Chord ring only for a short time. In mobile
        scenarios storage capacity, transmission data rate and battery power are limited
        resources, so the heavy traffic load generated by the shifting of object
        references can lead to severe problems when using Chord in a mobile scenario. In
        this paper, we present the Hybrid Chord Protocol (HCP). HCP solves the problem of
        frequent joins and leaves of nodes. As a further improvement of an efficient
        search, HCP supports the grouping of shared objects in interest groups. Our
        concept of using information profiles to describe shared objects allows defining
        special interest groups (context spaces) and a shared object to be available in
        multiple context spaces}, 
  www_section = {Chord, hybrid encryption, P2P}, 
  isbn = {978-3-540-25338-9}, 
  issn = {0302-9743}, 
  doi = {10.1007/b107118}, 
  url = {http://www.springerlink.com/content/pdn9ttp0bvk0f3e9/}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.68.7579.pdf}, 
}
_
@booklet{_,
  title = {A DHT-based Backup System}, 
  author = {Emil Sit and Josh Cates and Russ Cox}, 
  year = {2003}, 
  abstract = {Distributed hashtables have been proposed as a way to simplify the
        construction of large-scale distributed applications(e.g.[1,6]). DHTs are
        completely decentralized systems that provide block storage on a changing
        collection of nodes spread throughout the Internet. Each block is identified by
        aunique key. DHTs spread the load of storing and serving blocks across all of the
        active nodes and keep the blocks available as nodes join and leave the system.
        This paper presents the design and implementation of a cooperative off-site
        backup system, Venti-DHash. Venti-DHash is based on a DHT infrastructure and is
        designed to support recovery of data after a disaster by keeping regular
        snapshots of filesystems distributed off-site, on peers on the Internet. Where as
        conventional backup systems incur significant equipment costs, manual effort and
        high administrative overhead, we hope that a distributed backup system can
        alleviate these problems, making backups easy and feasible. By building this
        system on top of a DHT, the backup application inherits the properties of the
        DHT, and serves to evaluate the feasibility of using a DHT to build larg escale
        applications}, 
  www_section = {backup, distributed hash table}, 
  url = {http://doc.cat-v.org/plan_9/misc/venti-dhash/}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.104.8086.pdf}, 
}
_digitalfountains:
@booklet{_digitalfountains:,
  title = {Digital Fountains: A Survey and Look Forward Abstract {\textemdash} We}, 
  author = {TODO}, 
  year = {2004}, 
  abstract = {survey constructions and applications of digital fountains, an abstraction of
        erasure coding for network communication. Digital fountains effectively change
        the standard paradigm where a user receives an ordered stream of packets to one
        where a user must simply receive enough packets in order to obtain the desired
        data. Obviating the need for ordered data simplifies data delivery, especially
        when the data is large or is to be distributed to a large number of users. We
        also examine barriers to the adoption of digital fountains and discuss whether
        they can be overcome. I}, 
  url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.114.2282}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.114.2282.pdf}, 
%%%%% ERROR: Missing field
% www_section = {?????},
}
_onthe
@booklet{_onthe,
  title = {On the Strategic Importance of Programmable Middleboxes}, 
  author = {Thomas Fuhrmann}, 
  year = {2003}, 
  abstract = {Network protocols suffer from a lock dictated by the need for standardization
        and Metcalf's law. Programmable middleboxes can help to relieve the effects of
        that lock. This paper gives game theoretic arguments that show how the option of
        having middleboxes can raise the quality of communication protocols. Based on
        this analysis, design considerations for active and programmable networks are
        discussed}, 
  www_section = {programmable networks}, 
  url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.71.7171}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/fuhrmann03strategy.pdf},
}
abe
@conference{abe,
  title = {Universally Verifiable mix-net With Verification Work Independent of The Number
        of mix Servers}, 
  author = {Masayuki Abe}, 
  booktitle = {Proceedings of EUROCRYPT 1998}, 
  organization = {Springer-Verlag, LNCS 1403}, 
  year = {1998}, 
  publisher = {Springer-Verlag, LNCS 1403}, 
  abstract = {In this paper we construct a universally verifiable Mix-net where the amount
        of work done by a verifier is independent of the number of mix-servers.
        Furthermore, the computational task of each mix-server is constant against the
        number of mix-servers except for some negligible tasks like addition. The scheme
        is robust, too}, 
  www_section = {electronic voting, mix, universal verifiability}, 
  isbn = {978-3-540-64518-4}, 
  doi = {10.1007/BFb0054144}, 
  url = {http://www.springerlink.com/content/hl8838u4l9354544/}, 
}
acsac11-backlit
@conference{acsac11-backlit,
  title = {Exposing Invisible Timing-based Traffic Watermarks with BACKLIT}, 
  author = {Xiapu Luo and Peng Zhou and Junjie Zhang and Roberto Perdisci and Wenke Lee and
        Rocky K. C. Chang}, 
  booktitle = {ACSAC'11--Proceedings of 2011 Annual Computer Security Applications
        Conference}, 
  year = {2011}, 
  month = {December}, 
  address = {Orlando, FL, USA}, 
  abstract = {Traffic watermarking is an important element in many network security and
        privacy applications, such as tracing botnet C\&C communications and
        deanonymizing peer-to-peer VoIP calls. The state-of-the-art traffic watermarking
        schemes are usually based on packet timing information and they are notoriously
        difficult to detect. In this paper, we show for the first time that even the most
        sophisticated timing-based watermarking schemes (e.g., RAINBOW and SWIRL) are not
        invisible by proposing a new detection system called BACKLIT. BACKLIT is designed
        according to the observation that any practical timing-based traffic watermark
        will cause noticeable alterations in the intrinsic timing features typical of TCP
        flows. We propose five metrics that are sufficient for detecting four
        state-of-the-art traffic watermarks for bulk transfer and interactive traffic.
        BACKLIT can be easily deployed in stepping stones and anonymity networks (e.g.,
        Tor), because it does not rely on strong assumptions and can be realized in an
        active or passive mode. We have conducted extensive experiments to evaluate
        BACKLIT's detection performance using the PlanetLab platform. The results show
        that BACKLIT can detect watermarked network flows with high accuracy and few
        false positives}, 
  www_section = {BACKLIT, detection system, invisible, network security, packet timing
        information, privacy, traffic watermark}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/ACSAC\%2711\%20-\%20BACKLIT.pdf},
  url = {https://bibliography.gnunet.org}, 
}
acsac11-tortoise
@conference{acsac11-tortoise,
  title = {Exploring the Potential Benefits of Expanded Rate Limiting in Tor: Slow and
        Steady Wins the Race With Tortoise}, 
  author = {W. Brad Moore and Chris Wacek and Micah Sherr}, 
  booktitle = {ACSAC'11--Proceedings of 2011 Annual Computer Security Applications
        Conference}, 
  year = {2011}, 
  month = {December}, 
  address = {Orlando, FL, USA}, 
  abstract = {Tor is a volunteer-operated network of application-layer relays that enables
        users to communicate privately and anonymously. Unfortunately, Tor often exhibits
        poor performance due to congestion caused by the unbalanced ratio of clients to
        available relays, as well as a disproportionately high consumption of network
        capacity by a small fraction of filesharing users. This paper argues the very
        counterintuitive notion that slowing down traffic on Tor will increase the
        bandwidth capacity of the network and consequently improve the experience of
        interactive web users. We introduce Tortoise, a system for rate limiting Tor at
        its ingress points. We demonstrate that Tortoise incurs little penalty for
        interactive web users, while significantly decreasing the throughput for
        filesharers. Our techniques provide incentives to filesharers to configure their
        Tor clients to also relay traffic, which in turn improves the network's overall
        performance. We present large-scale emulation results that indicate that
        interactive users will achieve a significant speedup if even a small fraction of
        clients opt to run relays}, 
  www_section = {anonymity, performance, Tor}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/ACSAC\%2711\%20-\%20Tortoise.pdf},
  url = {https://bibliography.gnunet.org}, 
}
adams06
@article{adams06,
  title = {A Classification for Privacy Techniques}, 
  author = {Carlisle Adams}, 
  journal = {University of Ottawa Law \& Technology Journal}, 
  volume = {3}, 
  year = {2006}, 
  pages = {35--52}, 
  abstract = {This paper proposes a classification for techniques that encourage, preserve,
        or enhance privacy in online environments. This classification encompasses both
        automated mechanisms (those that exclusively or primarily use computers and
        software to implement privacy techniques) and nonautomated mechanisms (those that
        exclusively or primarily use human means to implement privacy techniques). We
        give examples of various techniques and show where they fit within this
        classification. The importance of such a classification is discussed along with
        its use as a tool for the comparison and evaluation of privacy techniques}, 
  www_section = {privacy}, 
  url = {http://papers.ssrn.com/sol3/papers.cfm?abstract_id=999672}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/adams06.pdf}, 
}
adida07
@conference{adida07,
  title = {How to Shuffle in Public}, 
  author = {Ben Adida and Douglas Wikstr{\"o}m}, 
  booktitle = {Proceedings of the Theory of Cryptography 2007}, 
  organization = {Springer Berlin / Heidelberg}, 
  year = {2007}, 
  month = {February}, 
  publisher = {Springer Berlin / Heidelberg}, 
  abstract = {We show how to obfuscate a secret shuffle of ciphertexts: shuffling becomes a
        public operation. Given a trusted party that samples and obfuscates a shuffle
        before any ciphertexts are received, this reduces the problem of constructing a
        mix-net to verifiable joint decryption. We construct public-key obfuscations of a
        decryption shuffle based on the Boneh-Goh-Nissim (BGN) cryptosystem and a
        re-encryption shuffle based on the Paillier cryptosystem. Both allow efficient
        distributed verifiable decryption. Finally, we give a distributed protocol for
        sampling and obfuscating each of the above shuffles and show how it can be used
        in a trivial way to construct a universally composable mix-net. Our constructions
        are practical when the number of senders N is small, yet large enough to handle a
        number of practical cases, e.g. N = 350 in the BGN case and N = 2000 in the
        Paillier case}, 
  www_section = {public key cryptography, re-encryption}, 
  isbn = {978-3-540-70935-0}, 
  doi = {10.1007/978-3-540-70936-7}, 
  url = {http://www.springerlink.com/content/j6p730488x602r28/}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/adida07.pdf}, 
}
agrawal03
@conference{agrawal03,
  title = {Probabilistic Treatment of MIXes to Hamper Traffic Analysis}, 
  author = {Dakshi Agrawal and Dogan Kesdogan and Stefan Penz}, 
  booktitle = {Proceedings of the 2003 IEEE Symposium on Security and Privacy}, 
  organization = {IEEE Computer Society Washington, DC, USA}, 
  year = {2003}, 
  month = {May}, 
  pages = {16--27}, 
  publisher = {IEEE Computer Society Washington, DC, USA}, 
  abstract = {The goal of anonymity providing techniques is to preserve the privacy of
        users, who has communicated with whom, for how long, and from which location, by
        hiding traffic information. This is accomplished by organizing additional traffic
        to conceal particular communication relationships and by embedding the sender and
        receiver of a message in their respective anonymity sets. If the number of
        overall participants is greater than the size of the anonymity set and if the
        anonymity set changes with time due to unsynchronized participants, then the
        anonymity technique becomes prone to traffic analysis attacks. In this paper, we
        are interested in the statistical properties of the disclosure attack, a newly
        suggested traffic analysis attack on the MIXes. Our goal is to provide analytical
        estimates of the number of observations required by the disclosure attack and to
        identify fundamental (but avoidable) {\textquoteleft}weak operational modes' of
        the MIXes and thus to protect users against a traffic analysis by the disclosure
        attack}, 
  www_section = {anonymity measurement, mix, traffic analysis}, 
  isbn = {0-7695-1940-7}, 
  url = {http://portal.acm.org/citation.cfm?id=829515.830557}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/agrawal03.pdf}, 
}
albrecht2006planetlab
@article{albrecht2006planetlab,
  title = {PlanetLab application management using Plush}, 
  author = {Albrecht, J. and Tuttle, C. and Snoeren, A.C. and Vahdat, A.}, 
  journal = {ACM SIGOPS Operating Systems Review}, 
  volume = {40}, 
  number = {1}, 
  year = {2006}, 
  pages = {33--40}, 
  publisher = {ACM}, 
  www_section = {application management, PlanetLab, plush, resource allocation, resource
        discovery}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/plush.pdf}, 
}
alpha-mixing:pet2006
@conference{alpha-mixing:pet2006,
  title = {Blending Different Latency Traffic with Alpha-Mixing}, 
  author = {Roger Dingledine and Andrei Serjantov and Paul Syverson}, 
  booktitle = {Proceedings of the Sixth Workshop on Privacy Enhancing Technologies (PET
        2006)}, 
  organization = {Springer}, 
  year = {2006}, 
  month = {June}, 
  address = {Cambridge, UK}, 
  pages = {245--257}, 
  editor = {George Danezis and Philippe Golle}, 
  publisher = {Springer}, 
  abstract = {Currently fielded anonymous communication systems either introduce too much
        delay and thus have few users and little security, or have many users but too
        little delay to provide protection against large attackers. By combining the user
        bases into the same network, and ensuring that all traffic is mixed together, we
        hope to lower delay and improve anonymity for both sets of users. Alpha-mixing is
        an approach that can be added to traditional batching strategies to let senders
        specify for each message whether they prefer security or speed. Here we describe
        how to add alpha-mixing to various mix designs, and show that mix networks with
        this feature can provide increased anonymity for all senders in the network.
        Along the way we encounter subtle issues to do with the attacker's knowledge of
        the security parameters of the users}, 
  www_section = {anonymity}, 
  isbn = {978-3-540-68790-0}, 
  doi = {10.1007/11957454}, 
  url = {http://www.springerlink.com/content/m23510526727k317/}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/alpha-mixing-pet2006.pdf},
}
andrei-thesis
@mastersthesis{andrei-thesis,
  title = {On the Anonymity of Anonymity Systems}, 
  author = {Andrei Serjantov}, 
  school = {University of Cambridge}, 
  year = {2004}, 
  month = {June}, 
  type = {phd}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/andrei-thesis.pdf}, 
%%%%% ERROR: Missing field
% www_section = {?????},
}
androulaki-pet2008
@conference{androulaki-pet2008,
  title = {Reputation Systems for Anonymous Networks}, 
  author = {Elli Androulaki and Seung Geol Choi and Steven M. Bellovin and Tal Malkin}, 
  booktitle = {Proceedings of the Eighth International Symposium on Privacy Enhancing
        Technologies (PETS 2008)}, 
  organization = {Springer}, 
  year = {2008}, 
  month = {July}, 
  address = {Leuven, Belgium}, 
  pages = {202--218}, 
  editor = {Borisov, Nikita and Ian Goldberg}, 
  publisher = {Springer}, 
  abstract = {We present a reputation scheme for a pseudonymous peer-to-peer (P2P) system
        in an anonymous network. Misbehavior is one of the biggest problems in
        pseudonymous P2P systems, where there is little incentive for proper behavior. In
        our scheme, using ecash for reputation points, the reputation of each user is
        closely related to his real identity rather than to his current pseudonym. Thus,
        our scheme allows an honest user to switch to a new pseudonym keeping his good
        reputation, while hindering a malicious user from erasing his trail of evil deeds
        with a new pseudonym}, 
  www_section = {anonymity, P2P, pseudonym}, 
  isbn = {978-3-540-70629-8}, 
  doi = {10.1007/978-3-540-70630-4_13}, 
  url = {http://portal.acm.org/citation.cfm?id=1428259.1428272}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/getTechreport.pdf}, 
}
anonymity_and_cover_traffic2014
@book{anonymity_and_cover_traffic2014,
  title = {Do Dummies Pay Off? Limits of Dummy Traffic Protection in Anonymous
        Communications}, 
  author = {Oya, Simon and Troncoso, Carmela and P{\'e}rez-Gonz{\'a}lez, Fernando}, 
  booktitle = {Privacy Enhancing Technologies}, 
  organization = {Springer International Publishing}, 
  volume = {8555}, 
  year = {2014}, 
  pages = {204--223}, 
  editor = {De Cristofaro, Emiliano and Murdoch, StevenJ}, 
  publisher = {Springer International Publishing}, 
  series = {Lecture Notes in Computer Science}, 
  abstract = {Anonymous communication systems ensure that correspondence between senders
        and receivers cannot be inferred with certainty.However, when patterns are
        persistent, observations from anonymous communication systems enable the
        reconstruction of user behavioral profiles. Protection against profiling can be
        enhanced by adding dummy messages, generated by users or by the anonymity
        provider, to the communication. In this paper we study the limits of the
        protection provided by this countermeasure. We propose an analysis methodology
        based on solving a least squares problem that permits to characterize the
        adversary's profiling error with respect to the user behavior, the anonymity
        provider behavior, and the dummy strategy. Focusing on the particular case of a
        timed pool mix we show how, given a privacy target, the performance analysis can
        be used to design optimal dummy strategies to protect this objective}, 
  www_section = {Unsorted}, 
  isbn = {978-3-319-08505-0}, 
  doi = {10.1007/978-3-319-08506-7_11}, 
  url = {http://dx.doi.org/10.1007/978-3-319-08506-7_11}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/anonymity_and_cover_traffic.pdf},
}
arias2014bs
@mastersthesis{arias2014bs,
  title = {Numerical Stability and Scalability of Secure Private Linear Programming}, 
  author = {Raphael Arias}, 
  school = {Technische Universit{\"a}t M{\"u}nchen}, 
  volume = {B. Sc}, 
  year = {2014}, 
  month = {February}, 
  address = {Garching bei M{\"u}nchen}, 
  pages = {0--65}, 
  type = {Bachelor's}, 
  abstract = {Linear programming (LP) has numerous applications in different fields. In
        some scenarios, e.g. supply chain master planning (SCMP), the goal is solving
        linear programs involving multiple parties reluctant to sharing their private
        information. In this case, methods from the area of secure multi-party
        computation (SMC) can be used. Secure multi-party versions of LP solvers have
        been known to be impractical due to high communication complexity. To overcome
        this, solutions based on problem transformation have been put forward. In this
        thesis, one such algorithm, proposed by Dreier and Kerschbaum, is discussed,
        implemented, and evaluated with respect to numerical stability and scalability.
        Results obtained with different parameter sets and different test cases are
        presented and some problems are exposed. It was found that the algorithm has some
        unforeseen limitations, particularly when implemented within the bounds of normal
        primitive data types. Random numbers generated during the protocol have to be
        extremely small so as to not cause problems with overflows after a series of
        multiplications. The number of peers participating additionally limits the size
        of numbers. A positive finding was that results produced when none of the
        aforementioned problems occur are generally quite accurate. We discuss a few
        possibilities to overcome some of the problems with an implementation using
        arbitrary precision numbers}, 
  www_section = {Unsorted}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/arias2014bs.pdf}, 
  url = {https://bibliography.gnunet.org}, 
}
back-hash
@booklet{back-hash,
  title = {Hash cash--a denial of service counter-measure}, 
  author = {Adam Back}, 
  year = {2008}, 
  abstract = {Hashcash was originally proposed as a mechanism to throttle systematic abuse
        of un-metered internet resources such as email, and anonymous remailers in May
        1997. Five years on, this paper captures in one place the various applications,
        improvements suggested and related subsequent publications, and describes initial
        experience from experiments using hashcash. The hashcash CPU cost-function
        computes a token which can be used as a proof-of-work. Interactive and
        non-interactive variants of cost-functions can be constructed which can be used
        in situations where the server can issue a challenge (connection oriented
        interactive protocol), and where it can not (where the communication is
        store--and--forward, or packet oriented) respectively}, 
  url = {citeseer.ist.psu.edu/back02hashcash.html}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/hashcash.pdf}, 
%%%%% ERROR: Missing field
% www_section = {?????},
}
back01
@conference{back01,
  title = {Traffic Analysis Attacks and Trade-Offs in Anonymity Providing Systems}, 
  author = {Adam Back and Ulf M{\"o}ller and Anton Stiglic}, 
  booktitle = {Proceedings of Information Hiding Workshop (IH 2001)}, 
  organization = {Springer-Verlag, LNCS 2137}, 
  year = {2001}, 
  month = {April}, 
  pages = {245--257}, 
  editor = {Ira S. Moskowitz}, 
  publisher = {Springer-Verlag, LNCS 2137}, 
  abstract = {We discuss problems and trade-offs with systems providing anonymity for web
        browsing (or more generally any communication system that requires low latency
        interaction). We focus on two main systems: the Freedom network [12] and PipeNet
        [8]. Although Freedom is efficient and reasonably secure against denial of
        service attacks, it is vulnerable to some generic traffic analysis attacks, which
        we describe. On the other hand, we look at PipeNet, a simple theoretical model
        which protects against the traffic analysis attacks we point out, but is
        vulnerable to denial of services attacks and has efficiency problems. In light of
        these observations, we discuss the trade-offs that one faces when trying to
        construct an efficient low latency communication system that protects users
        anonymity}, 
  www_section = {anonymity, Freedom, latency, Pipenet}, 
  isbn = {978-3-540-42733-9}, 
  doi = {10.1007/3-540-45496-9}, 
  url = {http://www.springerlink.com/content/4gpwtejkkvadcdcm/}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/traffic_0.pdf}, 
}
bartsthesis
@mastersthesis{bartsthesis,
  title = {Adapting Blackhat Approaches to Increase the Resilience of Whitehat Application
        Scenarios}, 
  author = {Polot, Bartlomiej}, 
  school = {Technische Universit{\"a}t M{\"u}nchen}, 
  volume = {M.S}, 
  year = {2010}, 
  address = {M{\"u}nchen}, 
  type = {masters}, 
  www_section = {Botnet, distributed hash table, GNUnet}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Polot2010.pdf}, 
  url = {https://bibliography.gnunet.org}, 
}
bauer:alpaca2008
@conference{bauer:alpaca2008,
  title = {BitBlender: Light-Weight Anonymity for BitTorrent}, 
  author = {Kevin Bauer and Damon McCoy and Dirk Grunwald and Douglas Sicker}, 
  booktitle = {Proceedings of the Workshop on Applications of Private and Anonymous
        Communications (AlPACa 2008)}, 
  organization = {ACM}, 
  year = {2008}, 
  month = {September}, 
  address = {Istanbul, Turkey}, 
  publisher = {ACM}, 
  abstract = {We present BitBlender, an efficient protocol that provides an anonymity layer
        for BitTorrent traffic. BitBlender works by creating an ad-hoc multi-hop network
        consisting of special peers called "relay peers" that proxy requests and replies
        on behalf of other peers. To understand the effect of introducing relay peers
        into the BitTorrent system architecture, we provide an analysis of the expected
        path lengths as the ratio of relay peers to normal peers varies. A prototype is
        implemented and experiments are conducted on Planetlab to quantify the
        performance overhead associated with the protocol. We also propose protocol
        extensions to add confidentiality and access control mechanisms, countermeasures
        against traffic analysis attacks, and selective caching policies that
        simultaneously increase both anonymity and performance. We finally discuss the
        potential legal obstacles to deploying an anonymous file sharing protocol. This
        work is among the first to propose a privacy enhancing system that is designed
        specifically for a particular class of peer-to-peer traffic}, 
  www_section = {ad-hoc networks, anonymity, P2P, privacy}, 
  doi = {10.1145/1461464.1461465}, 
  url = {http://portal.acm.org/citation.cfm?id=1461465}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/bauer-alpaca2008.pdf}, 
}
bauer:wpes2003
@conference{bauer:wpes2003,
  title = {New Covert Channels in HTTP: Adding Unwitting Web Browsers to Anonymity Sets}, 
  author = {Matthias Bauer}, 
  booktitle = {Proceedings of the Workshop on Privacy in the Electronic Society (WPES
        2003)}, 
  organization = {ACM New York, NY, USA}, 
  year = {2003}, 
  month = {October}, 
  address = {Washington, DC, USA}, 
  publisher = {ACM New York, NY, USA}, 
  abstract = {This paper presents new methods enabling anonymous communication on the
        Internet. We describe a new protocol that allows us to create an anonymous
        overlay network by exploiting the web browsing activities of regular users. We
        show that the overlay net work provides an anonymity set greater than the set of
        senders and receivers in a realistic threat model. In particular, the protocol
        provides unobservability in our threat model}, 
  www_section = {anonymity, convert channel, HTTP}, 
  isbn = {1-58113-776-1}, 
  doi = {10.1145/1005140.1005152}, 
  url = {http://portal.acm.org/citation.cfm?id=1005152}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.4.6246.pdf}, 
}
bauer:wpes2007
@conference{bauer:wpes2007,
  title = {Low-Resource Routing Attacks Against Tor}, 
  author = {Kevin Bauer and Damon McCoy and Dirk Grunwald and Tadayoshi Kohno and Douglas
        Sicker}, 
  booktitle = {Proceedings of the Workshop on Privacy in the Electronic Society (WPES
        2007)}, 
  organization = {ACM New York, NY, USA}, 
  year = {2007}, 
  month = {October}, 
  address = {Washington, DC, USA}, 
  publisher = {ACM New York, NY, USA}, 
%%%%% ERROR: Non-ASCII characters: ''\x80\x99\x80\x99\x80\x99''
  abstract = {Tor has become one of the most popular overlay networks for anonymizing TCP
        traffic. Its popularity is due in part to its perceived strong anonymity
        properties and its relatively low latency service. Low latency is achieved
        through Tor{\^a}€™s ability to balance the traffic load by optimizing Tor router
        selection to probabilistically favor routers with highbandwidth capabilities. We
        investigate how Tor{\^a}€™s routing optimizations impact its ability to provide
        strong anonymity. Through experiments conducted on PlanetLab, we show the extent
        to which routing performance optimizations have left the system vulnerable to
        end-to-end traffic analysis attacks from non-global adversaries with minimal
        resources. Further, we demonstrate that entry guards, added to mitigate path
        disruption attacks, are themselves vulnerable to attack. Finally, we explore
        solutions to improve Tor{\^a}€™s current routing algorithms and propose
        alternative routing strategies that prevent some of the routing attacks used in
        our experiments}, 
  www_section = {anonymity, load balancing, Tor, traffic analysis}, 
  isbn = {978-1-59593-883-1}, 
  doi = {10.1145/1314333.1314336}, 
  url = {http://portal.acm.org/citation.cfm?id=1314336}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/bauer-wpes2007.pdf}, 
}
beimel-barrier
@conference{beimel-barrier,
  title = {Breaking the $O(n^{1/(2k-1)})$ Barrier for Information-Theoretic Private
        Information Retrieval}, 
  author = {Amos Beimel and Yuval Ishai and Eyal Kushilevitz and Jean-Fran{\c c}ois
        Raymond}, 
  booktitle = {Proceedings of the 43rd IEEE Symposium on Foundations of Computer Science
        (FOCS)}, 
  year = {2002}, 
  abstract = {Private Information Retrieval (PIR) protocols allow a user to retrieve a data
        item from a database while hiding the identity of the item being retrieved.
        Specifically, in information-theoretic, k-server PIR protocols the database is
        replicated among k servers, and each server learns nothing about the item the
        user retrieves. The cost of such protocols is measured by the communication
        complexity of retrieving one out of n bits of data. For any fixed k, the
        complexity of the best protocols prior to our work was 0(n^{\frac{1}{{2k--1}}})
        (Ambainis, 1997). Since then several methods were developed in an attempt to beat
        this bound, but all these methods yielded the same asymptotic bound.In this work,
        this barrier is finally broken and the complexity of information-theoretic
        k-server PIR is improved to n^{0(\frac{{\log \log k}}{{k\log k}})}. The new PIR
        protocols can also be used to construct k-query binary locally decodable codes of
        length exp (n^{0(\frac{{\log \log k}}{{k\log k}})}), compared to
        exp(n^{\frac{1}{{k--1}}}) in previous constructions. The improvements presented
        in this paper apply even for small values of k: the PIR protocols are more
        efficient than previous ones for every k \geqslant 3, and the locally decodable
        codes are shorter for every k \geqslant 4}, 
  www_section = {private information retrieval}, 
  isbn = {0-7695-1822-2}, 
  url = {http://portal.acm.org/citation.cfm?id=652187}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/beimel-barrier.pdf}, 
}
beimel-robust
@conference{beimel-robust,
  title = {Robust information-theoretic private information retrieval}, 
  author = {Amos Beimel and Yoav Stahl}, 
  booktitle = {Proceedings of the 3rd Conference on Security in Communication Networks}, 
  organization = {Springer-Verlag}, 
  volume = {2576}, 
  year = {2002}, 
  pages = {326--341}, 
  publisher = {Springer-Verlag}, 
  series = {Lecture Notes in Computer Science}, 
  abstract = {A Private Information Retrieval (PIR) protocol allows a user to retrieve a
        data item of its choice from a database, such that the servers storing the
        database do not gain information on the identity of the item being retrieved. PIR
        protocols were studied in depth since the subject was introduced in Chor,
        Goldreich, Kushilevitz, and Sudan 1995. The standard definition of PIR protocols
        raises a simple question--what happens if some of the servers crash during the
        operation? How can we devise a protocol which still works in the presence of
        crashing servers? Current systems do not guarantee availability of servers at all
        times for many reasons, e.g., crash of server or communication problems. Our
        purpose is to design robust PIR protocols, i.e., protocols which still work
        correctly even if only k out of l servers are available during the protocols'
        operation (the user does not know in advance which servers are available). We
        present various robust PIR protocols giving different tradeofis between the
        different parameters. These protocols are incomparable, i.e., for different
        values of n and k we will get better results using different protocols. We first
        present a generic transformation from regular PIR protocols to robust PIR
        protocols, this transformation is important since any improvement in the
        communication complexity of regular PIR protocol will immediately implicate
        improvement in the robust PIR protocol communication. We also present two
        specific robust PIR protocols. Finally, we present robust PIR protocols which can
        tolerate Byzantine servers, i.e., robust PIR protocols which still work in the
        presence of malicious servers or servers with corrupted or obsolete databases}, 
  www_section = {obsolete database, private information retrieval, robustness}, 
  isbn = {978-3-540-00420-2}, 
  doi = {10.1007/3-540-36413-7}, 
  url = {http://www.springerlink.com/content/9bnlbf2e2lp9u9p4/}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/BS.pdf}, 
}
beimel01informationtheoretic
@article{beimel01informationtheoretic,
  title = {Information-Theoretic Private Information Retrieval: A Unified Construction}, 
  author = {Amos Beimel and Yuval Ishai}, 
  journal = {Lecture Notes in Computer Science}, 
  volume = {2076}, 
  year = {2001}, 
  pages = {89--98}, 
  abstract = {A Private Information Retrieval (PIR) protocol enables a user to retrieve a
        data item from a database while hiding the identity of the item being retrieved.
        In a t-private, k-server PIR protocol the database is replicated among k servers,
        and the user's privacy is protected from any collusion of up to t servers. The
        main cost-measure of such protocols is the communication complexity of retrieving
        a single bit of data. This work addresses the information-theoretic setting for
        PIR, in which the user's privacy should be unconditionally protected from
        collusions of servers. We present a unified general construction, whose abstract
        components can be instantiated to yield both old and new families of PIR
        protocols. A main ingredient in the new protocols is a generalization of a
        solution by Babai, Kimmel, and Lokam to a communication complexity problem in the
        so-called simultaneous messages model. Our construction strictly improves upon
        previous constructions and resolves some previous anomalies. In particular, we
        obtain: (1) t-private k-server PIR protocols with O(n 1/{\lfllor}
        (2k-1)/tc{\rfloor}) communication bits, where n is the database size. For t > 1,
        this is a substantial asymptotic improvement over the previous state of the art;
        (2) a constant-factor improvement in the communication complexity of 1-private
        PIR, providing the first improvement to the 2-server case since PIR protocols
        were introduced; (3) efficient PIR protocols with logarithmic query length. The
        latter protocols have applications to the construction of efficient families of
        locally decodable codes over large alphabets and to PIR protocols with reduced
        work by the servers}, 
  www_section = {communication complexity, privacy, private information retrieval}, 
  isbn = {978-3-540-42287-7}, 
  issn = {0302-9743}, 
  doi = {10.1007/3-540-48224-5}, 
  url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.5.2796}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/beimel01informationtheoretic.pdf},
}
berman-fc2004
@conference{berman-fc2004,
  title = {Provable Unlinkability Against Traffic Analysis}, 
  author = {Ron Berman and Amos Fiat and Amnon Ta-Shma}, 
  booktitle = {Proceedings of Financial Cryptography (FC '04)}, 
  organization = {Springer-Verlag, LNCS 3110}, 
  year = {2004}, 
  month = {February}, 
  pages = {266--280}, 
  editor = {Ari Juels}, 
  publisher = {Springer-Verlag, LNCS 3110}, 
  abstract = {We consider unlinkability of communication problem: given n users, each
        sending a message to some destination, encode and route the messages so that an
        adversary analyzing the traffic in the communication network cannot link the
        senders with the recipients. A solution should have a small communication
        overhead, that is, the number of additional messages should be kept low. David
        Chaum introduced idea of mixes for solving this problem. His approach was
        developed further by Simon and Rackoff, and implemented later as the onion
        protocol. Even if the onion protocol is widely regarded as secure and used in
        practice, formal arguments supporting this claim are rare and far from being
        complete. On top of that, in certain scenarios very simple tricks suffice to
        break security without breaking the cryptographic primitives. It turns out that
        one source of difficulties in analyzing the onion protocols security is the
        adversary model. In a recent work, Berman, Fiat and Ta-Shma develop a new and
        more realistic model in which only a constant fraction of communication lines can
        be accessed by an adversary, the number of messages does not need to be high and
        the preferences of the users are taken into account. For this model they prove
        that with high probability a good level of unlinkability is obtained after steps
        of the onion protocol where n is the number of messages sent. In this paper we
        improve these results: we show that the same level of unlinkability (expressed as
        variation distance between certain probability distributions) is obtained with
        high probability already after steps of the onion protocol. Asymptotically, this
        is the best result possible, since obviously (log n) steps are necessary. On top
        of that, our analysis is much simpler. It is based on path coupling technique
        designed for showing rapid mixing of Markov chains}, 
  www_section = {anonymity, Markov chain, path coupling, rapid mixing, unlinkability}, 
  isbn = {978-3-540-23208-7}, 
  doi = {10.1007/b100936}, 
  url = {http://www.springerlink.com/content/cknab9y9bpete2ha/}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/berman-fc2004.pdf}, 
}
bnymble11
@conference{bnymble11,
  title = {BNymble: More anonymous blacklisting at almost no cost}, 
  author = {Peter Lofgren and Nicholas J. Hopper}, 
  booktitle = {FC'11--Proceedings of Financial Cryptography and Data Security}, 
  year = {2011}, 
  month = {February}, 
  address = {St. Lucia}, 
  abstract = {Anonymous blacklisting schemes allow online service providers to prevent
        future anonymous access by abusive users while preserving the privacy of all
        anonymous users (both abusive and non-abusive). The first scheme proposed for
        this purpose was Nymble, an extremely efficient scheme based only on symmetric
        primitives; however, Nymble relies on trusted third parties who can collude to
        de-anonymize users of the scheme. Two recently proposed schemes, Nymbler and
        Jack, reduce the trust placed in these third parties at the expense of using
        less-efficient asymmetric crypto primitives. We present BNymble, a scheme which
        matches the anonymity guarantees of Nymbler and Jack while (nearly) maintaining
        the efficiency of the original Nymble. The key insight of BNymble is that we can
        achieve the anonymity goals of these more recent schemes by replacing only the
        infrequent {\textquotedblleft}User Registration{\textquotedblright} protocol from
        Nymble with asymmetric primitives. We prove the security of BNymble, and report
        on its efficiency}, 
  www_section = {anonymous access, anonymous blacklisting, BNymble}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/FC\%2711\%20-\%20BNymble.pdf},
  url = {https://bibliography.gnunet.org}, 
}
boneh04publickey
@conference{boneh04publickey,
  title = {Public-key encryption with keyword search}, 
  author = {Dan Boneh and Giovanni Di Crescenzo and Rafail Ostrovsky and Gieseppe
        Persiano}, 
  booktitle = {Eurocrypt 2004}, 
  organization = {Springer-Verlag}, 
  year = {2004}, 
  month = {January}, 
  publisher = {Springer-Verlag}, 
  abstract = {We study the problem of searching on data that is encrypted using a public
        key system. Consider user Bob who sends email to user Alice encrypted under
        Alice's public key. An email gateway wants to test whether the email contains the
        keyword "urgent" so that it could route the email accordingly. Alice, on the
        other hand does not wish to give the gateway the ability to decrypt all her
        messages. We define and construct a mechanism that enables Alice to provide a key
        to the gateway that}, 
  url = {citeseer.ist.psu.edu/boneh04public.html}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/encsearch.pdf}, 
%%%%% ERROR: Missing field
% www_section = {?????},
}
bootstrap2008gauthierdickey
@conference{bootstrap2008gauthierdickey,
  title = {Bootstrapping of Peer-to-Peer Networks}, 
  author = {Chis GauthierDickey and Christian Grothoff}, 
  booktitle = {Proceedings of DAS-P2P}, 
  organization = {IEEE}, 
  year = {2008}, 
  month = {August}, 
  address = {Turku, Finland}, 
  publisher = {IEEE}, 
  abstract = {In this paper, we present the first heuristic for fully distributed
        bootstrapping of peer-to-peer networks. Our heuristic generates a stream of
        promising IP addresses to be probed as entry points. This stream is generated
        using statistical profiles using the IP ranges of start-of-authorities (SOAs) in
        the domain name system (DNS). We present experimental results demonstrating that
        with this approach it is efficient and practical to bootstrap Gnutella-sized
        peer-to-peer networks --- without the need for centralized services or the public
        exposure of end-user's private IP addresses}, 
  www_section = {bootstrapping, DNS, installation, P2P}, 
  url = {http://grothoff.org/christian/bootstrap.pdf}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/bootstrap.pdf}, 
}
brands06
@article{brands06,
  title = {Secure User Identification Without Privacy Erosion}, 
  author = {Stefan Brands}, 
  journal = {University of Ottawa Law \& Technology Journal}, 
  volume = {3}, 
  year = {2006}, 
  pages = {205--223}, 
  abstract = {Individuals are increasingly confronted with requests to identify themselves
        when accessing services provided by government organizations, companies, and
        other service providers. At the same time, traditional transaction mechanisms are
        increasingly being replaced by electronic mechanisms that underneath their hood
        automatically capture and record globally unique identifiers. Taken together,
        these interrelated trends are currently eroding the privacy and security of
        individuals in a manner unimaginable just a few decades ago. Privacy activists
        are facing an increasingly hopeless battle against new privacy-invasive
        identification initiatives: the cost of computerized identification systems is
        rapidly going down, their accuracy and efficiency is improving all the time, much
        of the required data communication infrastructure is now in place, forgery of
        non-electronic user credentials is getting easier all the time, and data sharing
        imperatives have gone up dramatically. This paper argues that the privacy vs.
        identification debate should be moved into less polarized territory. Contrary to
        popular misbelief, identification and privacy are not opposite interests that
        need to be balanced: the same technological advances that threaten to annihilate
        privacy can be exploited to save privacy in an electronic age. The aim of this
        paper is to clarify that premise on the basis of a careful analysis of the
        concept of user identification itself. Following an examination of user
        identifiers and its purposes, I classify identification technologies in a manner
        that enables their privacy and security implications to be clearly articulated
        and contrasted. I also include an overview of a modern privacy-preserving
        approach to user identification}, 
  www_section = {authentication, cryptography, data sharing, privacy}, 
  url = {http://papers.ssrn.com/sol3/papers.cfm?abstract_id=999695}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/brands06.pdf}, 
}
busca:pastis:
@conference{busca:pastis:,
  title = {Pastis: A Highly-Scalable Multi-user Peer-to-Peer File System}, 
  author = {Jean-Michel Busca and Fabio Picconi and Pierre Sens}, 
  booktitle = {Euro-Par'05 Parallel Processing}, 
  organization = {Springer-Verlag}, 
  year = {2005}, 
  month = {September}, 
  address = {Lisboa, Portugal}, 
  pages = {1173--1182}, 
  publisher = {Springer-Verlag}, 
  abstract = {We introduce Pastis, a completely decentralized multi-user read-write
        peer-to-peer file system. In Pastis every file is described by a modifiable
        inode-like structure which contains the addresses of the immutable blocks in
        which the file contents are stored. All data are stored using the Past
        distributed hash table (DHT), which we have modified in order to reduce the
        number of network messages it generates, thus optimizing replica retrieval.
        Pastis' design is simple compared to other existing systems, as it does not
        require complex algorithms like Byzantine-fault tolerant (BFT) replication or a
        central administrative authority. It is also highly scalable in terms of the
        number of network nodes and users sharing a given file or portion of the file
        system. Furthermore, Pastis takes advantage of the fault tolerance and good
        locality properties of its underlying storage layer, the Past DHT. We have
        developed a prototype based on the FreePastry open-source implementation of the
        Past DHT. We have used this prototype to evaluate several characteristics of our
        file system design. Supporting the close-to-open consistency model, plus a
        variant of the read-your-writes model, our prototype shows that Pastis is between
        1.4 to 1.8 times slower than NFS. In comparison, Ivy and Oceanstore are between
        two to three times slower than NFS}, 
  www_section = {distributed hash table, multi-user, Pastis, peer-to-peer file system,
        read-write}, 
  doi = {10.1007/11549468_128}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Euro-Par\%2705\%20-\%20Pastis.pdf},
}
buses03
@article{buses03,
  title = {Buses for Anonymous Message Delivery}, 
  author = {Amos Beimel and Shlomi Dolev}, 
  journal = {Journal of Cryptology}, 
  volume = {16}, 
  number = {1}, 
  year = {2003}, 
  pages = {25--39}, 
  abstract = {This work develops a novel approach to hide the senders and the receivers of
        messages. The intuition is taken from an everyday activity that hides the
        {\textquoteleft}{\textquoteleft}communication pattern''{\textemdash}the public
        transportation system. To describe our protocols, buses are used as a metaphor:
        Buses, i.e., messages, are traveling on the network, each piece of information is
        allocated a seat within the bus. Routes are chosen and buses are scheduled to
        traverse these routes. Deterministic and randomized protocols are presented, the
        protocols differ in the number of buses in the system, the worst case traveling
        time, and the required buffer size in a
        {\textquoteleft}{\textquoteleft}station.'' In particular, a protocol that is
        based on cluster partition of the network is presented; in this protocol there is
        one bus traversing each cluster. The clusters' size in the partition gives time
        and communication tradeoffs. One advantage of our protocols over previous works
        is that they are not based on statistical properties for the communication
        pattern. Another advantage is that they only require the processors in the
        communication network to be busy periodically}, 
  www_section = {privacy, traffic analysis}, 
  issn = {0933-2790}, 
  doi = {10.1007/s00145-002-0128-6}, 
  url = {http://www.springerlink.com/content/eljjgl3ec01c00xa/}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.80.1566.pdf}, 
}
c.rhea:probabilistic
@conference{c.rhea:probabilistic,
  title = {Probabilistic Location and Routing}, 
  author = {Rhea, Sean C. and John Kubiatowicz}, 
  booktitle = {INFOCOM'02. Proceedings of the 21th Annual Joint Conference of the IEEE
        Computer and Communications Societies}, 
  organization = {IEEE Computer Society}, 
  year = {2002}, 
  month = {June}, 
  address = {New York, NY, USA}, 
  pages = {-1--1}, 
  publisher = {IEEE Computer Society}, 
  abstract = {We propose probabilistic location to enhance the performance of existing
        peer-to-peer location mechanisms in the case where a replica for the queried data
        item exists close to the query source. We introduce the attenuated Bloom filter,
        a lossy distributed index data structure. We describe how to use these data
        structures for document location and how to maintain them despite document
        motion. We include a detailed performance study which indicates that our
        algorithm performs as desired, both finding closer replicas and finding them
        faster than deterministic algorithms alone}, 
  www_section = {Bloom filter, document location, document motion, probabilistic location}, 
  isbn = {0-7803-7476-2}, 
  doi = {http://dx.doi.org/10.1109/INFCOM.2002.1019375}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/INFOCOM\%2702\%20-\%20Probabilistic\%20location\%20and\%20routing.pdf},
}
camenisch2002da
@conference{camenisch2002da,
  title = {Dynamic Accumulators and Application to Efficient Revocation of Anonymous
        Credentials}, 
  author = {Jan Camenisch and Anna Lysyanskaya}, 
  booktitle = {Proceedings of CRYPTO 2002}, 
  organization = {Springer Verlag, LNCS 2442}, 
  year = {2002}, 
  pages = {61--76}, 
  publisher = {Springer Verlag, LNCS 2442}, 
  abstract = {We introduce the notion of a dynamic accumulator. An accumulator scheme
        allows one to hash a large set of inputs into one short value, such that there is
        a short proof that a given input was incorporated into this value. A dynamic
        accumulator allows one to dynamically add and delete a value, such that the cost
        of an add or delete is independent of the number of accumulated values. We
        provide a construction of a dynamic accumulator and an efficient zero-knowledge
        proof of knowledge of an accumulated value. We prove their security under the
        strong RSA assumption. We then show that our construction of dynamic accumulators
        enables efficient revocation of anonymous credentials, and membership revocation
        for recent group signature and identity escrow schemes}, 
  www_section = {anonymity, certificate revocation, credentials, dynamic accumulators,
        group signatures, identity escrow}, 
  isbn = {978-3-540-44050-5}, 
  doi = {10.1007/3-540-45708-9}, 
  url = {http://portal.acm.org/citation.cfm?id=704437}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/camenisch2002da.pdf}, 
}
camenisch2002ssep
@conference{camenisch2002ssep,
  title = {A Signature Scheme with Efficient Protocols}, 
  author = {Jan Camenisch and Anna Lysyanskaya}, 
  booktitle = {Proceedings of SCN '02, Third Conference on Security in Communication
        Networks}, 
  organization = {Springer Verlag, LNCS 2576}, 
  year = {2002}, 
  pages = {268--289}, 
  publisher = {Springer Verlag, LNCS 2576}, 
  abstract = {Digital signature schemes are a fundamental cryptographic primitive, of use
        both in its own right, and as a building block in cryptographic protocol design.
        In this paper, we propose a practical and provably secure signature scheme and
        show protocols (1) for issuing a signature on a committed value (so the signer
        has no information about the signed value), and (2) for proving knowledge of a
        signature on a committed value. This signature scheme and corresponding protocols
        are a building block for the design of anonymity-enhancing cryptographic systems,
        such as electronic cash, group signatures, and anonymous credential systems. The
        security of our signature scheme and protocols relies on the Strong RSA
        assumption. These results are a generalization of the anonymous credential system
        of Camenisch and Lysyanskaya}, 
  www_section = {anonymity, anonymous credential system, digital signature}, 
  isbn = {978-3-540-00420-2}, 
  doi = {10.1007/3-540-36413-7}, 
  url = {http://www.springerlink.com/content/r66ywt172y06g5qr/}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/camenisch2002ssep.pdf}, 
}
camenisch2003pve
@conference{camenisch2003pve,
  title = {Practical Verifiable Encryption and Decryption of Discrete Logarithms}, 
  author = {Jan Camenisch and Victor Shoup}, 
  booktitle = {Proceedings of CRYPTO 2003}, 
  organization = {Springer Verlag, LNCS 2729}, 
  year = {2003}, 
  pages = {126--144}, 
  publisher = {Springer Verlag, LNCS 2729}, 
  abstract = {This paper addresses the problem of designing practical protocols for proving
        properties about encrypted data. To this end, it presents a variant of the new
        public key encryption of Cramer and Shoup based on Pailliers decision composite
        residuosity assumption, along with efficient protocols for verifiable encryption
        and decryption of discrete logarithms (and more generally, of representations
        with respect to multiple bases). This is the first verifiable encryption system
        that provides chosen ciphertext security and avoids inefficient cut-and-choose
        proofs. The presented protocols have numerous applications, including key escrow,
        optimistic fair exchange, publicly verifiable secret and signature sharing,
        universally composable commitments, group signatures, and confirmer signatures}, 
  www_section = {public key cryptography}, 
  isbn = {978-3-540-40674-7}, 
  doi = {10.1007/b11817}, 
  url = {http://www.springerlink.com/content/wjbh5579hdfd66ed/}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/camenisch2003pve.pdf}, 
}
camlys05
@conference{camlys05,
  title = {A Formal Treatment of Onion Routing}, 
  author = {Jan Camenisch and Anna Lysyanskaya}, 
  booktitle = {Proceedings of CRYPTO 2005}, 
  organization = {Springer-Verlag, LNCS 3621}, 
  year = {2005}, 
  month = {August}, 
  pages = {169--187}, 
  editor = {Victor Shoup}, 
  publisher = {Springer-Verlag, LNCS 3621}, 
  abstract = {Anonymous channels are necessary for a multitude of privacy-protecting
        protocols. Onion routing is probably the best known way to achieve anonymity in
        practice. However, the cryptographic aspects of onion routing have not been
        sufficiently explored: no satisfactory definitions of security have been given,
        and existing constructions have only had ad-hoc security analysis for the most
        part. We provide a formal definition of onion-routing in the universally
        composable framework, and also discover a simpler definition (similar to CCA2
        security for encryption) that implies security in the UC framework. We then
        exhibit an efficient and easy to implement construction of an onion routing
        scheme satisfying this definition}, 
  www_section = {onion routing, privacy}, 
  isbn = {978-3-540-28114-6}, 
  doi = {10.1007/11535218}, 
  url = {http://www.springerlink.com/content/0jmg1krt9ph147ql/}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/camlys05.pdf}, 
}
casc-rep
@conference{casc-rep,
  title = {Reliable MIX Cascade Networks through Reputation}, 
  author = {Roger Dingledine and Paul Syverson}, 
  booktitle = {Proceedings of Financial Cryptography (FC '02)}, 
  organization = {Springer-Verlag, LNCS 2357}, 
  year = {2002}, 
  month = {March}, 
  editor = {Matt Blaze}, 
  publisher = {Springer-Verlag, LNCS 2357}, 
  abstract = {We describe a MIX cascade protocol and a reputation system that together
        increase the reliability of a network of MIX cascades. In our protocol, MIX nodes
        periodically generate a communally random seed that, along with their
        reputations, determines cascade configuration. Nodes send test messages to
        monitor their cascades. Senders can also demonstrate message decryptions to
        convince honest cascade members that a cascade is misbehaving. By allowing any
        node to declare the failure of its own cascade, we eliminate the need for global
        trusted witnesses}, 
  www_section = {anonymity, communal randomness, P2P, reputation}, 
  isbn = {978-3-540-00646-6}, 
  doi = {10.1007/3-540-36504-4}, 
  url = {http://www.springerlink.com/content/g67u25lm80234qj4/}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/casc-rep.pdf}, 
}
ccs07-blac
@conference{ccs07-blac,
  title = {Blacklistable Anonymous Credentials: Blocking Misbehaving Users without TTPs}, 
  author = {Patrick P. Tsang and Man Ho Au and Apu Kapadia and Sean Smith}, 
  booktitle = {Proceedings of CCS 2007}, 
  organization = {ACM New York, NY, USA}, 
  year = {2007}, 
  month = {October}, 
  publisher = {ACM New York, NY, USA}, 
  abstract = {Several credential systems have been proposed in which users can authenticate
        to services anonymously. Since anonymity can give users the license to misbehave,
        some variants allow the selective deanonymization (or linking) of misbehaving
        users upon a complaint to a trusted third party (TTP). The ability of the TTP to
        revoke a user's privacy at any time, however, is too strong a punishment for
        misbehavior. To limit the scope of deanonymization, systems such as "e-cash" have
        been proposed in which users are deanonymized under only certain types of
        well-defined misbehavior such as "double spending." While useful in some
        applications, it is not possible to generalize such techniques to more subjective
        definitions of misbehavior. We present the first anonymous credential system in
        which services can "blacklist" misbehaving users without contacting a TTP. Since
        blacklisted users remain anonymous, misbehaviors can be judged subjectively
        without users fearing arbitrary deanonymization by a TTP}, 
  www_section = {privacy, revocation, user misbehavior}, 
  isbn = {978-1-59593-703-2}, 
  doi = {10.1145/1315245.1315256}, 
  url = {http://portal.acm.org/citation.cfm?id=1315245.1315256}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/ccs07-blac.pdf}, 
}
ccs07-doa
@conference{ccs07-doa,
  title = {Denial of Service or Denial of Security? How Attacks on Reliability can
        Compromise Anonymity}, 
  author = {Borisov, Nikita and George Danezis and Prateek Mittal and Parisa Tabriz}, 
  booktitle = {Proceedings of CCS 2007}, 
  organization = {ACM New York, NY, USA}, 
  year = {2007}, 
  month = {October}, 
  publisher = {ACM New York, NY, USA}, 
  abstract = {We consider the effect attackers who disrupt anonymous communications have on
        the security of traditional high- and low-latency anonymous communication
        systems, as well as on the Hydra-Onion and Cashmere systems that aim to offer
        reliable mixing, and Salsa, a peer-to-peer anonymous communication network. We
        show that denial of service (DoS) lowers anonymity as messages need to get
        retransmitted to be delivered, presenting more opportunities for attack. We
        uncover a fundamental limit on the security of mix networks, showing that they
        cannot tolerate a majority of nodes being malicious. Cashmere, Hydra-Onion, and
        Salsa security is also badly affected by DoS attackers. Our results are backed by
        probabilistic modeling and extensive simulations and are of direct applicability
        to deployed anonymity systems}, 
  www_section = {anonymity, attack, denial-of-service, reliability}, 
  isbn = {978-1-59593-703-2}, 
  doi = {10.1145/1315245.1315258}, 
  url = {http://portal.acm.org/citation.cfm?id=1315258}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/ccs07-doa.pdf}, 
}
ccs09-torsk
@conference{ccs09-torsk,
  title = {Scalable onion routing with Torsk}, 
  author = {Jon McLachlan and Andrew Tran and Nicholas J. Hopper and Yongdae Kim}, 
  booktitle = {Proceedings of CCS 2009}, 
  organization = {ACM New York, NY, USA}, 
  year = {2009}, 
  month = {November}, 
  publisher = {ACM New York, NY, USA}, 
  abstract = {We introduce Torsk, a structured peer-to-peer low-latency anonymity protocol.
        Torsk is designed as an interoperable replacement for the relay selection and
        directory service of the popular Tor anonymity network, that decreases the
        bandwidth cost of relay selection and maintenance from quadratic to quasilinear
        while introducing no new attacks on the anonymity provided by Tor, and no
        additional delay to connections made via Tor. The resulting bandwidth savings
        make a modest-sized Torsk network significantly cheaper to operate, and allows
        low-bandwidth clients to join the network. Unlike previous proposals for P2P
        anonymity schemes, Torsk does not require all users to relay traffic for others.
        Torsk utilizes a combination of two P2P lookup mechanisms with complementary
        strengths in order to avoid attacks on the confidentiality and integrity of
        lookups. We show by analysis that previously known attacks on P2P anonymity
        schemes do not apply to Torsk, and report on experiments conducted with a
        336-node wide-area deployment of Torsk, demonstrating its efficiency and
        feasibility}, 
  www_section = {P2P}, 
  isbn = {978-1-60558-894-0}, 
  doi = {10.1145/1653662.1653733}, 
  url = {http://portal.acm.org/citation.cfm?id=1653662.1653733}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/ccs09-torsk.pdf}, 
}
ccs2008:mittal
@conference{ccs2008:mittal,
  title = {Information Leaks in Structured Peer-to-peer Anonymous Communication Systems}, 
  author = {Prateek Mittal and Borisov, Nikita}, 
  booktitle = {Proceedings of the 15th ACM Conference on Computer and Communications
        Security (CCS 2008)}, 
  organization = {ACM Press}, 
  year = {2008}, 
  month = {October}, 
  address = {Alexandria, Virginia, USA}, 
  pages = {267--278}, 
  editor = {Paul Syverson and Somesh Jha and Xiaolan Zhang}, 
  publisher = {ACM Press}, 
  abstract = {We analyze information leaks in the lookup mechanisms of structured
        peer-to-peer anonymous communication systems and how these leaks can be used to
        compromise anonymity. We show that the techniques that are used to combat active
        attacks on the lookup mechanism dramatically increase information leaks and
        increase the efficacy of passive attacks. Thus there is a trade-off between
        robustness to active and passive attacks. We study this trade-off in two P2P
        anonymous systems, Salsa and AP3. In both cases, we find that, by combining both
        passive and active attacks, anonymity can be compromised much more effectively
        than previously thought, rendering these systems insecure for most proposed uses.
        Our results hold even if security parameters are changed or other improvements to
        the systems are considered. Our study therefore motivates the search for new
        approaches to P2P anonymous communication}, 
  www_section = {anonymity, attack, information leaks, P2P}, 
  isbn = {978-1-59593-810-7}, 
  doi = {10.1145/1455770.1455805}, 
  url = {http://portal.acm.org/citation.cfm?id=1455805}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/ccs2008-mittal.pdf}, 
}
ccs2008:tsang
@conference{ccs2008:tsang,
  title = {PEREA: Towards Practical TTP-Free Revocation in Anonymous Authentication}, 
  author = {Patrick P. Tsang and Man Ho Au and Apu Kapadia and Sean Smith}, 
  booktitle = {Proceedings of the 15th ACM Conference on Computer and Communications
        Security (CCS 2008)}, 
  organization = {ACM Press}, 
  year = {2008}, 
  month = {October}, 
  address = {Alexandria, Virginia, USA}, 
  pages = {333--345}, 
  editor = {Paul Syverson and Somesh Jha and Xiaolan Zhang}, 
  publisher = {ACM Press}, 
  www_section = {non-membership proofs, subjective blacklisting}, 
  isbn = {978-1-59593-810-7}, 
  doi = {10.1145/1455770.1455813}, 
  url = {http://portal.acm.org/citation.cfm?id=1455813}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/perea-ccs08.pdf}, 
}
ccs2008:wang
@conference{ccs2008:wang,
  title = {Dependent Link Padding Algorithms for Low Latency Anonymity Systems}, 
  author = {Wei Wang and Mehul Motani and Vikram Srinivasan}, 
  booktitle = {Proceedings of the 15th ACM Conference on Computer and Communications
        Security (CCS 2008)}, 
  organization = {ACM Press}, 
  year = {2008}, 
  month = {October}, 
  address = {Alexandria, Virginia, USA}, 
  pages = {323--332}, 
  editor = {Paul Syverson and Somesh Jha and Xiaolan Zhang}, 
  publisher = {ACM Press}, 
  abstract = {Low latency anonymity systems are susceptive to traffic analysis attacks. In
        this paper, we propose a dependent link padding scheme to protect anonymity
        systems from traffic analysis attacks while providing a strict delay bound. The
        covering traffic generated by our scheme uses the minimum sending rate to provide
        full anonymity for a given set of flows. The relationship between user anonymity
        and the minimum covering traffic rate is then studied via analysis and
        simulation. When user flows are Poisson processes with the same sending rate, the
        minimum covering traffic rate to provide full anonymity to m users is O(log m).
        For Pareto traffic, we show that the rate of the covering traffic converges to a
        constant when the number of flows goes to infinity. Finally, we use real Internet
        trace files to study the behavior of our algorithm when user flows have different
        rates}, 
  www_section = {anonymity service, link padding, traffic analysis}, 
  isbn = {978-1-59593-810-7}, 
  doi = {10.1145/1455770.1455812}, 
  url = {http://portal.acm.org/citation.cfm?id=1455812}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Dependent\%20Link\%20Padding\%20Algorithms\%20for.pdf},
}
ccs2011-cirripede
@conference{ccs2011-cirripede,
  title = {Cirripede: Circumvention Infrastructure using Router Redirection with Plausible
        Deniability}, 
  author = {Amir Houmansadr and Giang T. K. Nguyen and Matthew Caesar and Borisov, Nikita}, 
  booktitle = {CCS'11--Proceedings of the 18th ACM conference on Computer and
        Communications Security}, 
  organization = {ACM}, 
  year = {2011}, 
  month = {October}, 
  address = {Chicago, IL, United States}, 
  publisher = {ACM}, 
  abstract = {Many users face surveillance of their Internet communications and a
        significant fraction suffer from outright blocking of certain destinations.
        Anonymous communication systems allow users to conceal the destinations they
        communicate with, but do not hide the fact that the users are using them. The
        mere use of such systems may invite suspicion, or access to them may be blocked.
        We therefore propose Cirripede, a system that can be used for unobservable
        communication with Internet destinations. Cirripede is designed to be deployed by
        ISPs; it intercepts connections from clients to innocent-looking destinations and
        redirects them to the true destination requested by the client. The communication
        is encoded in a way that is indistinguishable from normal communications to
        anyone without the master secret key, while public-key cryptography is used to
        eliminate the need for any secret information that must be shared with Cirripede
        users. Cirripede is designed to work scalably with routers that handle large
        volumes of traffic while imposing minimal overhead on ISPs and not disrupting
        existing traffic. This allows Cirripede proxies to be strategically deployed at
        central locations, making access to Cirripede very difficult to block. We built a
        proof-of-concept implementation of Cirripede and performed a testbed evaluation
        of its performance properties}, 
  www_section = {censorship-resistance, unobservability}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/CCS\%2711\%20-\%20Cirripede.pdf},
  url = {https://bibliography.gnunet.org}, 
}
ccs2011-stealthy
@conference{ccs2011-stealthy,
  title = {Stealthy Traffic Analysis of Low-Latency Anonymous Communication Using
        Throughput Fingerprinting}, 
  author = {Prateek Mittal and Ahmed Khurshid and Joshua Juen and Matthew Caesar and
        Borisov, Nikita}, 
  booktitle = {CCS'11--Proceedings of the 18th ACM conference on Computer and
        Communications Security}, 
  organization = {ACM}, 
  year = {2011}, 
  month = {October}, 
  address = {Chicago, IL, United States}, 
  publisher = {ACM}, 
  abstract = {Anonymity systems such as Tor aim to enable users to communicate in a manner
        that is untraceable by adversaries that control a small number of machines. To
        provide efficient service to users, these anonymity systems make full use of
        forwarding capacity when sending traffic between intermediate relays. In this
        paper, we show that doing this leaks information about the set of Tor relays in a
        circuit (path). We present attacks that, with high confidence and based solely on
        throughput information, can (a) reduce the attacker's uncertainty about the
        bottleneck relay of any Tor circuit whose throughput can be observed, (b) exactly
        identify the guard relay(s) of a Tor user when circuit throughput can be observed
        over multiple connections, and (c) identify whether two concurrent TCP
        connections belong to the same Tor user, breaking unlinkability. Our attacks are
        stealthy, and cannot be readily detected by a user or by Tor relays. We validate
        our attacks using experiments over the live Tor network. We find that the
        attacker can substantially reduce the entropy of a bottleneck relay distribution
        of a Tor circuit whose throughput can be observed{\textemdash}the entropy gets
        reduced by a factor of 2 in the median case. Such information leaks from a single
        Tor circuit can be combined over multiple connections to exactly identify a
        user's guard relay(s). Finally, we are also able to link two connections from the
        same initiator with a crossover error rate of less than 1.5\% in under 5 minutes.
        Our attacks are also more accurate and require fewer resources than previous
        attacks on Tor}, 
  www_section = {anonymity, attacks, throughput}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/CCS\%2711\%20-\%20Throughput-fingerprinting.pdf},
  url = {https://bibliography.gnunet.org}, 
}
ccs2011-trust
@conference{ccs2011-trust,
  title = {Trust-based Anonymous Communication: Adversary Models and Routing Algorithms}, 
  author = {Aaron Johnson and Paul Syverson and Roger Dingledine and Nick Mathewson}, 
  booktitle = {CCS'11--Proceedings of the 18th ACM conference on Computer and
        Communications Security}, 
  organization = {ACM}, 
  year = {2011}, 
  month = {October}, 
  address = {Chicago, IL, United States}, 
  publisher = {ACM}, 
  abstract = {We introduce a novel model of routing security that incorporates the
        ordinarily overlooked variations in trust that users have for different parts of
        the network. We focus on anonymous communication, and in particular onion
        routing, although we expect the approach to apply more broadly. This paper
        provides two main contributions. First, we present a novel model to consider the
        various security concerns for route selection in anonymity networks when users
        vary their trust over parts of the network. Second, to show the usefulness of our
        model, we present as an example a new algorithm to select paths in onion routing.
        We analyze its effectiveness against deanonymization and other information leaks,
        and particularly how it fares in our model versus existing algorithms, which do
        not consider trust. In contrast to those, we find that our trust-based routing
        strategy can protect anonymity against an adversary capable of attacking a
        significant fraction of the network}, 
  www_section = {anonymous communication, onion routing, privacy, trust}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/CCS\%2711\%20-\%20Trust-based\%20Anonymous\%20Communication1.pdf},
  url = {https://bibliography.gnunet.org}, 
}
ccsw09-fingerprinting
@conference{ccsw09-fingerprinting,
  title = {Website fingerprinting: attacking popular privacy enhancing technologies with
        the multinomial naive-bayes classifier}, 
  author = {Dominik Herrmann and Rolf Wendolsky and Hannes Federrath}, 
  booktitle = {Proceedings of the 2009 ACM workshop on Cloud computing security (CCSW
        '09)}, 
  organization = {ACM}, 
  year = {2009}, 
  address = {New York, NY, USA}, 
  pages = {31--42}, 
  publisher = {ACM}, 
  abstract = {Privacy enhancing technologies like OpenSSL, OpenVPN or Tor establish an
        encrypted tunnel that enables users to hide content and addresses of requested
        websites from external observers This protection is endangered by local traffic
        analysis attacks that allow an external, passive attacker between the PET system
        and the user to uncover the identity of the requested sites. However, existing
        proposals for such attacks are not practicable yet. We present a novel method
        that applies common text mining techniques to the normalised frequency
        distribution of observable IP packet sizes. Our classifier correctly identifies
        up to 97\% of requests on a sample of 775 sites and over 300,000 real-world
        traffic dumps recorded over a two-month period. It outperforms previously known
        methods like Jaccard's classifier and Na{\"\i}ve Bayes that neglect packet
        frequencies altogether or rely on absolute frequency values, respectively. Our
        method is system-agnostic: it can be used against any PET without alteration.
        Closed-world results indicate that many popular single-hop and even multi-hop
        systems like Tor and JonDonym are vulnerable against this general fingerprinting
        attack. Furthermore, we discuss important real-world issues, namely false alarms
        and the influence of the browser cache on accuracy}, 
  www_section = {forensics, latency, text mining, traffic analysis}, 
  isbn = {978-1-60558-784-4}, 
  doi = {10.1145/1655008.1655013}, 
  url = {http://portal.acm.org/citation.cfm?id=1655013\&dl=GUIDE\&coll=GUIDE\&CFID=83763210\&CFTOKEN=75697565},
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/ccsw09-fingerprinting.pdf},
}
cebolla
@conference{cebolla,
  title = {Cebolla: Pragmatic IP Anonymity}, 
  author = {Zach Brown}, 
  booktitle = {Proceedings of the 2002 Ottawa Linux Symposium}, 
  year = {2002}, 
  month = {June}, 
  abstract = {Cebolla is an intersection of cryptographic mix networks and the environment
        of the public Internet. Most of the history of cryptographic mix networks lies in
        academic attempts to provide anonymity of various sorts to the users of the
        network. While based on strong cryptographic principles, most attempts have
        failed to address properties of the public network and the reasonable
        expectations of most of its users. Cebolla attempts to address this gulf between
        the interesting research aspects of IP level anonymity and the operational
        expectations of most uses of the IP network}, 
  www_section = {anonymity, cryptography}, 
  url = {http://www.linuxinsight.com/ols2002_cebolla_pragmatic_ip_anonymity.html}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/cebolla.pdf}, 
}
chaffinch
@conference{chaffinch,
  title = {Chaffinch: Confidentiality in the Face of Legal Threats}, 
  author = {Richard Clayton and George Danezis}, 
  booktitle = {Proceedings of Information Hiding Workshop (IH 2002)}, 
  organization = {Springer-Verlag, LNCS 2578}, 
  year = {2002}, 
  month = {October}, 
  editor = {Fabien Petitcolas}, 
  publisher = {Springer-Verlag, LNCS 2578}, 
  abstract = {We present the design and rationale of a practical system for passing
        confidential messages. The mechanism is an adaptation of Rivest's
        {\textquotedblleft}chaffing and winnowing{\textquotedblright}, which has the
        legal advantage of using authentication keys to provide privacy.We identify a
        weakness in Rivest's particular choice of his {\textquotedblleft}package
        transform{\textquotedblright} as an
        {\textquotedblleft}all-or-nothing{\textquotedblright} element within his scheme.
        We extend the basic system to allow the passing of several messages concurrently.
        Only some of these messages need be divulged under legal duress, the other
        messages will be plausibly deniable. We show how this system may have some
        resilience to the type of legal attack inherent in the UK's Regulation of
        Investigatory Powers (RIP) Act}, 
  www_section = {legal attack, RIP}, 
  doi = {10.1007/3-540-36415-3}, 
  url = {http://portal.acm.org/citation.cfm?id=647598.732024}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Chaffinch.pdf}, 
}
chatzikokolakis2006apn
@article{chatzikokolakis2006apn,
  title = {Anonymity Protocols as Noisy Channels?}, 
  author = {Konstantinos Chatzikokolakis and Catuscia Palamidessi and Prakash Panangaden}, 
  journal = {Proc. 2nd Symposium on Trustworthy Global Computing, LNCS. Springer}, 
  volume = {4661/2007}, 
  year = {2006}, 
  pages = {281--300}, 
  abstract = {We propose a framework in which anonymity protocols are interpreted as
        particular kinds of channels, and the degree of anonymity provided by the
        protocol as the converse of the channel's capacity. We also investigate how the
        adversary can test the system to try to infer the user's identity, and we study
        how his probability of success depends on the characteristics of the channel. We
        then illustrate how various notions of anonymity can be expressed in this
        framework, and show the relation with some definitions of probabilistic anonymity
        in literature. This work has been partially supported by the INRIA DREI
        {\'E}quipe Associ{\'e}e PRINTEMPS. The work of Konstantinos Chatzikokolakis and
        Catuscia Palamidessi has been also supported by the INRIA ARC project ProNoBiS}, 
  www_section = {anonymity}, 
  isbn = {978-3-540-75333-9}, 
  issn = {0302-9743}, 
  doi = {10.1007/978-3-540-75336-0}, 
  url = {http://www.springerlink.com/content/04247873k1719274/}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.79.4460.pdf}, 
}
chaum-dc
@article{chaum-dc,
  title = {The Dining Cryptographers Problem: Unconditional Sender and Recipient
        Untraceability}, 
  author = {David Chaum}, 
  journal = {Journal of Cryptology}, 
  volume = {1}, 
  year = {1988}, 
  pages = {65--75}, 
  abstract = {Keeping confidential who sends which messages, in a world where any physical
        transmission can be traced to its origin, seems impossible. The solution
        presented here is unconditionally or cryptographically secure, depending on
        whether it is based on one-time-use keys or on public keys, respectively. It can
        be adapted to address efficiently a wide variety of practical considerations}, 
  www_section = {pseudonym, unconditional security, untraceability}, 
  issn = {0933-2790}, 
  url = {http://portal.acm.org/citation.cfm?id=54239}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/dcnet-jcrypt88.pdf}, 
}
chaum-mix
@article{chaum-mix,
  title = {Untraceable electronic mail, return addresses, and digital pseudonyms}, 
  author = {David Chaum}, 
  journal = {Communications of the ACM}, 
  volume = {24}, 
  number = {2}, 
  year = {1981}, 
  month = {February}, 
  pages = {84--90}, 
  abstract = {A technique based on public key cryptography is presented that allows an
        electronic mail system to hide who a participant communicates with as well as the
        content of the communication--in spite of an unsecured underlying
        telecommunication system. The technique does not require a universally trusted
        authority. One correspondent can remain anonymous to a second, while allowing the
        second to respond via an untraceable return address. The technique can also be
        used to form rosters of untraceable digital pseudonyms from selected
        applications. Applicants retain the exclusive ability to form digital signatures
        corresponding to their pseudonyms. Elections in which any interested party can
        verify that the ballots have been properly counted are possible if anonymously
        mailed ballots are signed with pseudonyms from a roster of registered voters.
        Another use allows an individual to correspond with a record-keeping organization
        under a unique pseudonym which appears in a roster of acceptable clients}, 
  www_section = {digital signature, electronic mail, privacy, pseudonym, public key
        cryptography, traffic analysis}, 
  issn = {0001-0782}, 
  doi = {http://doi.acm.org/10.1145/358549.358563}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/chaum-mix_0.pdf}, 
  url = {https://bibliography.gnunet.org}, 
}
chaum85
@article{chaum85,
  title = {Security without Identification: Transaction Systems to Make Big Brother
        Obsolete}, 
  author = {David Chaum}, 
  journal = {Communications of the ACM}, 
  volume = {28}, 
  number = {10}, 
  year = {1985}, 
  month = {October}, 
  pages = {1030--1044}, 
  abstract = {The large-scale automated transaction systems of the near future can be
        designed to protect the privacy and maintain the security of both individuals and
        organizations}, 
  issn = {0001-0782}, 
  doi = {http://doi.acm.org/10.1145/4372.4373}, 
  url = {http://portal.acm.org/citation.cfm?id=4373}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.48.4680.pdf}, 
  www_section = {Unsorted}, 
}
cheap-pseudonyms
@article{cheap-pseudonyms,
  title = {The social cost of cheap pseudonyms}, 
  author = {Eric Friedman and Paul Resnick}, 
  journal = {Journal of Economics and Management Strategy}, 
  volume = {10}, 
  number = {2}, 
  year = {2001}, 
  pages = {173--199}, 
  abstract = {We consider the problems of societal norms for cooperation and reputation
        when it is possible to obtain cheap pseudonyms, something that is becoming quite
        common in a wide variety of interactions on the Internet. This introduces
        opportunities to misbehave without paying reputational consequences. A large
        degree of cooperation can still emerge, through a convention in which newcomers
        "pay their dues" by accepting poor treatment from players who have established
        positive reputations. One might hope for an open society where newcomers are
        treated well, but there is an inherent social cost in making the spread of
        reputations optional. We prove that no equilibrium can sustain significantly more
        cooperation than the dues-paying equilibrium in a repeated random matching game
        with a large number of players in which players have finite lives and the ability
        to change their identities, and there is a small but nonvanishing probability of
        mistakes. Although one could remove the inefficiency of mistreating newcomers by
        disallowing anonymity, this is not practical or desirable in a wide variety of
        transactions. We discuss the use of entry fees, which permits newcomers to be
        trusted but excludes some players with low payoffs, thus introducing a different
        inefficiency. We also discuss the use of free but unreplaceable pseudonyms, and
        describe a mechanism that implements them using standard encryption techniques,
        which could be practically implemented in electronic transactions}, 
  www_section = {pseudonym}, 
  doi = {10.1111/j.1430-9134.2001.00173.x}, 
  url = {http://www3.interscience.wiley.com/journal/119023370/abstract?CRETRY=1\&SRETRY=0},
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/081199.pdf}, 
}
chl05-full:eurocrypt2005
@conference{chl05-full:eurocrypt2005,
  title = {Compact E-Cash}, 
  author = {Jan Camenisch and Susan Hohenberger and Anna Lysyanskaya}, 
  booktitle = {Proceedings of EUROCRYPT 2005}, 
  organization = {Springer}, 
  volume = {3494}, 
  year = {2005}, 
  pages = {302--321}, 
  editor = {Ronald Cramer}, 
  publisher = {Springer}, 
  series = {Lecture Notes in Computer Science}, 
  abstract = {This paper presents efficient off-line anonymous e-cash schemes where a user
        can withdraw a wallet containing 2^l coins each of which she can spend
        unlinkably. Our first result is a scheme, secure under the strong RSA and the
        y-DDHI assumptions, where the complexity of the withdrawal and spend operations
        is O(l+k) and the user's wallet can be stored using O(l+k) bits, where k is a
        security parameter. The best previously known schemes require at least one of
        these complexities to be O(2^l k). In fact, compared to previous e-cash schemes,
        our whole wallet of 2^l coins has about the same size as one coin in these
        schemes. Our scheme also offers exculpability of users, that is, the bank can
        prove to third parties that a user has double-spent. We then extend our scheme to
        our second result, the first e-cash scheme that provides traceable coins without
        a trusted third party. That is, once a user has double spent one of the 2^l coins
        in her wallet, all her spendings of these coins can be traced. We present two
        alternate constructions. One construction shares the same complexities with our
        first result but requires a strong bilinear map assumption that is only
        conjectured to hold on MNT curves. The second construction works on more general
        types of elliptic curves, but the price for this is that the complexity of the
        spending and of the withdrawal protocols becomes O(lk) and O(lk + k^2) bits,
        respectively, and wallets take O(lk) bits of storage. All our schemes are secure
        in the random oracle model}, 
  isbn = {3-540-25910-4}, 
  doi = {10.1007/b136415}, 
  url = {http://www.springerlink.com/content/vwkgkfpdmrdky5a8/}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/chl05-full-eurocrypt2005.pdf},
%%%%% ERROR: Missing field
% www_section = {?????},
}
ciaccio:pet2006
@conference{ciaccio:pet2006,
  title = {Improving Sender Anonymity in a Structured Overlay with Imprecise Routing}, 
  author = {Giuseppe Ciaccio}, 
  booktitle = {Proceedings of the Sixth Workshop on Privacy Enhancing Technologies (PET
        2006)}, 
  organization = {Springer}, 
  year = {2006}, 
  month = {June}, 
  address = {Cambridge, UK}, 
  pages = {190--207}, 
  editor = {George Danezis and Philippe Golle}, 
  publisher = {Springer}, 
  abstract = {In the framework of peer to peer distributed systems, the problem of
        anonymity in structured overlay networks remains a quite elusive one. It is
        especially unclear how to evaluate and improve sender anonymity, that is,
        untraceability of the peers who issue messages to other participants in the
        overlay. In a structured overlay organized as a chordal ring, we have found that
        a technique originally developed for recipient anonymity also improves sender
        anonymity. The technique is based on the use of imprecise entries in the routing
        tables of each participating peer. Simulations show that the sender anonymity, as
        measured in terms of average size of anonymity set, decreases slightly if the
        peers use imprecise routing; yet, the anonymity takes a better distribution, with
        good anonymity levels becoming more likely at the expenses of very high and very
        low levels. A better quality of anonymity service is thus provided to
        participants}, 
  www_section = {anonymity, P2P}, 
  isbn = {978-3-540-68790-0}, 
  doi = {10.1007/11957454}, 
  url = {http://www.springerlink.com/content/v473127846n07255/}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/ciaccio-pet2006.pdf}, 
}
citeulike:1360149
@booklet{citeulike:1360149,
  title = {Experiences Deploying a Large-Scale Emergent Network}, 
  author = {O'Hearn, Bryce W.}, 
  year = {2002}, 
  abstract = {Mojo Nation\&quot;w as a netw ork for robust, decentralized file storage
        and transfer}, 
  isbn = {3-540-44179-4}, 
  url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.59.9607}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Experiences_Deploying_a_Large-Scale_Emergent_Network.pdf},
%%%%% ERROR: Missing field
% www_section = {?????},
}
citeulike:2549551
@booklet{citeulike:2549551,
  title = {PipeNet 1.1}, 
  author = {Dai, Wei}, 
  year = {1998}, 
  url = {http://www.eskimo.com/~weidai/pipenet.txt}, 
%%%%% ERROR: Missing field
% www_section = {?????},
}
citeulike:530977
@article{citeulike:530977,
  title = {Taxonomy of trust: Categorizing P2P reputation systems}, 
  author = {Marti, Sergio and Hector Garcia-Molina}, 
  journal = {Management in Peer-to-Peer Systems}, 
  volume = {50}, 
  number = {4}, 
  year = {2006}, 
  month = {March}, 
  pages = {472--484}, 
  abstract = {The field of peer-to-peer reputation systems has exploded in the last few
        years. Our goal is to organize existing ideas and work to facilitate system
        design. We present a taxonomy of reputation system components, their properties,
        and discuss how user behavior and technical constraints can conflict. In our
        discussion, we describe research that exemplifies compromises made to deliver a
        useable, implementable system}, 
  www_section = {P2P, trust}, 
  doi = {10.1016/j.comnet.2005.07.011}, 
  url = {http://portal.acm.org/citation.cfm?id=1139713}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Marti-ElsevierScienceSubmitted05_0.pdf},
}
cl01a
@conference{cl01a,
  title = {An Efficient System for Non-transferable Anonymous Credentials with Optional
        Anonymity Revocation}, 
  author = {Jan Camenisch and Anna Lysyanskaya}, 
  booktitle = {Proceedings of the International Conference on the Theory and Application of
        Cryptographic Techniques (EUROCRYPT '01)}, 
  organization = {Springer-Verlag}, 
  year = {2001}, 
  address = {London, UK}, 
  pages = {93--118}, 
  publisher = {Springer-Verlag}, 
  abstract = {A credential system is a system in which users can obtain credentials from
        organizations and demonstrate possession of these credentials. Such a system is
        anonymous when transactions carried out by the same user cannot be linked. An
        anonymous credential system is of significant practical relevance because it is
        the best means of providing privacy for users. In this paper we propose a
        practical anonymous credential system that is based on the strong RSA assumption
        and the decisional Diffie-Hellman assumption modulo a safe prime product and is
        considerably superior to existing ones: 1 We give the first practical solution
        that allows a user to unlinkably demonstrate possession of a credential as many
        times as necessary without involving the issuing organization. 2 To prevent
        misuse of anonymity, our scheme is the first to offer optional anonymity
        revocation for particular transactions. 3 Our scheme offers separability: all
        organizations can choose their cryptographic keys independently of each other.
        Moreover, we suggest more effective means of preventing users from sharing their
        credentials, by introducing all-or-nothing sharing: a user who allows a friend to
        use one of her credentials once, gives him the ability to use all of her
        credentials, i.e., taking over her identity. This is implemented by a new
        primitive, called circular encryption, which is of independent interest, and can
        be realized from any semantically secure cryptosystem in the random oracle
        model}, 
  isbn = {3-540-42070-3}, 
  url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.63.9429}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/cl01a.pdf}, 
%%%%% ERROR: Missing field
% www_section = {?????},
}
clayton:pet2003
@conference{clayton:pet2003,
  title = {Improving Onion Notation}, 
  author = {Richard Clayton}, 
  booktitle = {Proceedings of Privacy Enhancing Technologies workshop (PET 2003)}, 
  organization = {Springer-Verlag, LNCS 2760}, 
  year = {2003}, 
  month = {March}, 
  pages = {81--87}, 
  editor = {Roger Dingledine}, 
  publisher = {Springer-Verlag, LNCS 2760}, 
  abstract = {Several di$\#$erent notations are used in the literature of MIX networks to
        describe the nested encrypted structures now widely known as "onions". The
        shortcomings of these notations are described and a new notation is proposed,
        that as well as having some advantages from a typographical point of view, is
        also far clearer to read and to reason about. The proposed notation generated a
        lively debate at the PET2003 workshop and the various views, and alternative
        proposals, are reported upon. The workshop participants did not reach any
        consensus on improving onion notation, but there is now a heightened awareness of
        the problems that can arise with existing representations}, 
  www_section = {onion routing}, 
  url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.14.5965}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/clayton-pet2003.pdf}, 
}
clayton:pet2006
@conference{clayton:pet2006,
  title = {Ignoring the Great Firewall of China}, 
  author = {Richard Clayton and Steven J. Murdoch and Robert N. M. Watson}, 
  booktitle = {Proceedings of the Sixth Workshop on Privacy Enhancing Technologies (PET
        2006)}, 
  organization = {Springer}, 
  year = {2006}, 
  month = {June}, 
  address = {Cambridge, UK}, 
  pages = {20--35}, 
  editor = {George Danezis and Philippe Golle}, 
  publisher = {Springer}, 
  abstract = {The so-called {\textquotedblleft}Great Firewall of China{\textquotedblright}
        operates, in part, by inspecting TCP packets for keywords that are to be blocked.
        If the keyword is present, TCP reset packets (viz: with the RST flag set) are
        sent to both endpoints of the connection, which then close. However, because the
        original packets are passed through the firewall unscathed, if the endpoints
        completely ignore the firewall's resets, then the connection will proceed
        unhindered. Once one connection has been blocked, the firewall makes further
        easy-to-evade attempts to block further connections from the same machine. This
        latter behaviour can be leveraged into a denial-of-service attack on third-party
        machines}, 
  isbn = {978-3-540-68790-0}, 
  doi = {10.1007/11957454}, 
  url = {http://www.springerlink.com/content/7224582654260k03/}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/clayton-pet2006.pdf}, 
%%%%% ERROR: Missing field
% www_section = {?????},
}
clog-the-queue
@conference{clog-the-queue,
  title = {Don't Clog the Queue: Circuit Clogging and Mitigation in P2P anonymity schemes}, 
  author = {Jon McLachlan and Nicholas J. Hopper}, 
  booktitle = {Proceedings of Financial Cryptography (FC '08)}, 
  organization = {Springer-Verlag Berlin, Heidelberg}, 
  year = {2008}, 
  month = {January}, 
  publisher = {Springer-Verlag Berlin, Heidelberg}, 
  abstract = {At Oakland 2005, Murdoch and Danezis described an attack on the Tor anonymity
        service that recovers the nodes in a Tor circuit, but not the client. We observe
        that in a peer-to-peer anonymity scheme, the client is part of the circuit and
        thus the technique can be of greater significance in this setting. We
        experimentally validate this conclusion by showing that "circuit clogging" can
        identify client nodes using the MorphMix peer-to-peer anonymity protocol. We also
        propose and empirically validate the use of the Stochastic Fair Queueing
        discipline on outgoing connections as an efficient and low-cost mitigation
        technique}, 
  www_section = {anonymity, P2P, Tor}, 
  doi = {10.1007/978-3-540-85230-8_3}, 
  url = {http://portal.acm.org/citation.cfm?id=1428551}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/clog-the-queue.pdf}, 
}
clonewars
@conference{clonewars,
  title = {How to win the clonewars: efficient periodic n-times anonymous authentication}, 
  author = {Jan Camenisch and Susan Hohenberger and Markulf Kohlweiss and Anna Lysyanskaya
        and Mira Meyerovich}, 
  booktitle = {Proceedings of the 13th ACM conference on Computer and communications
        security (CCS 2006)}, 
  organization = {ACM Press}, 
  year = {2006}, 
  address = {New York, NY, USA}, 
  pages = {201--210}, 
  publisher = {ACM Press}, 
  abstract = {We create a credential system that lets a user anonymously authenticate at
        most $n$ times in a single time period. A user withdraws a dispenser of n
        e-tokens. She shows an e-token to a verifier to authenticate herself; each
        e-token can be used only once, however, the dispenser automatically refreshes
        every time period. The only prior solution to this problem, due to Damg{\r a}rd
        et al. [29], uses protocols that are a factor of k slower for the user and
        verifier, where k is the security parameter. Damg{\r a}rd et al. also only
        support one authentication per time period, while we support n. Because our
        construction is based on e-cash, we can use existing techniques to identify a
        cheating user, trace all of her e-tokens, and revoke her dispensers. We also
        offer a new anonymity service: glitch protection for basically honest users who
        (occasionally) reuse e-tokens. The verifier can always recognize a reused
        e-token; however, we preserve the anonymity of users who do not reuse e-tokens
        too often}, 
  www_section = {clone detection, credentials, n-anonymous authentication}, 
  isbn = {1-59593-518-5}, 
  doi = {10.1145/1180405.1180431}, 
  url = {http://portal.acm.org/citation.cfm?id=1180431}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/clonewars.pdf}, 
}
conf/acsac/ADC07
@conference{conf/acsac/ADC07,
  title = {Closed-Circuit Unobservable Voice Over IP}, 
  author = {Carlos Aguilar Melchor and Yves Deswarte and Julien Iguchi-Cartigny}, 
  booktitle = {Proceedings of 23rd Annual Computer Security Applications Conference
        (ACSAC'07), Miami, FL, USA}, 
  organization = {IEEE Computer Society Press}, 
  year = {2007}, 
  publisher = {IEEE Computer Society Press}, 
  abstract = {Among all the security issues in Voice over IP (VoIP) communications, one of
        the most difficult to achieve is traf- fic analysis resistance. Indeed, classical
        approaches pro- vide a reasonable degree of security but induce large round- trip
        times that are incompatible with VoIP. In this paper, we describe some of the
        privacy and secu- rity issues derived from traffic analysis in VoIP. We also give
        an overview of how to provide low-latency VoIP communi- cation with strong
        resistance to traffic analysis. Finally, we present a server which can provide
        such resistance to hun- dreds of users even if the server is compromised}, 
  www_section = {latency, unobservability, VoIP}, 
  isbn = {0-7695-3060-5}, 
  doi = {10.1109/ACSAC.2007.34}, 
  url = {http://www.computer.org/portal/web/csdl/doi/10.1109/ACSAC.2007.34}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/ADC07.pdf}, 
}
conf/infocom/GollapudiSZ05
@conference{conf/infocom/GollapudiSZ05,
  title = {Exploiting anarchy in networks: a game-theoretic approach to combining fairness
        and throughput}, 
  author = {Gollapudi, Sreenivas and Sivakumar, D. and Zhang, Aidong}, 
  booktitle = {INFOCOM 2005. Proceedings of the 24th Annual Joint Conference of the IEEE
        Computer and Communications Societies}, 
  organization = {IEEE Computer Society}, 
  year = {2005}, 
  month = {March}, 
  address = {Miami, FL, USA}, 
  pages = {2147--2158}, 
  publisher = {IEEE Computer Society}, 
  abstract = {We propose a novel mechanism for routing and bandwidth allocation that
        exploits the selfish and rational behavior of flows in a network. Our mechanism
        leads to allocations that simultaneously optimize throughput and fairness
        criteria. We analyze the performance of our mechanism in terms of the induced
        Nash equilibrium. We compare the allocations at the Nash equilibrium with
        throughput-optimal allocations as well as with fairness-optimal allocations. Our
        mechanism offers a smooth trade-off between these criteria, and allows us to
        produce allocations that are approximately optimal with respect to both. Our
        mechanism is also fairly simple and admits an efficient distributed
        implementation}, 
  www_section = {bandwidth allocation, dblp, nash equilibrium, routing allocation}, 
  isbn = {0-7803-8968-9}, 
  doi = {10.1109/INFCOM.2005.1498490}, 
  url = {http://dblp.uni-trier.de/db/conf/infocom/infocom2005.html$\#$GollapudiSZ05}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/INFOCOM\%2705\%20-\%20Exploiting\%20anarchy\%20in\%20networks.pdf},
}
conf/infocom/StutzbachR06
@conference{conf/infocom/StutzbachR06,
  title = {Improving Lookup Performance Over a Widely-Deployed DHT}, 
  author = {Stutzbach, Daniel and Rejaie, Reza}, 
  booktitle = {INFOCOM}, 
  organization = {IEEE}, 
  year = {2006}, 
  publisher = {IEEE}, 
  abstract = {During recent years, Distributed Hash Tables (DHTs) have been extensively
        studied through simulation and analysis. However, due to their limited
        deployment, it has not been possible to observe the behavior of a widely-deployed
        DHT in practice. Recently, the popular eMule file-sharing software incorporated a
        Kademlia-based DHT, called Kad, which currently has around one million
        simultaneous users. In this paper, we empirically study the performance of the
        key DHT operation, lookup, over Kad. First, we analytically derive the benefits
        of different ways to increase the richness of routing tables in Kademlia-based
        DHTs. Second, we empirically characterize two aspects of the accuracy of routing
        tables in Kad, namely completeness and freshness, and characterize their impact
        on Kad's lookup performance. Finally, we investigate how the efficiency and
        consistency of lookup in Kad can be improved by performing parallel lookup and
        maintaining multiple replicas, respectively. Our results pinpoint the best
        operating point for the degree of lookup parallelism and the degree of
        replication for Kad}, 
  www_section = {distributed hash table, redundancy}, 
  url = {http://dblp.uni-trier.de/db/conf/infocom/infocom2006.html$\#$StutzbachR06}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/infocom06-kad.pdf}, 
}
conf/infocom/SunHYL06
@conference{conf/infocom/SunHYL06,
  title = {A Trust Evaluation Framework in Distributed Networks: Vulnerability Analysis and
        Defense Against Attacks}, 
  author = {Yan L. Sun and Zhu Han and Wei Yu and K. J. Ray Liu}, 
  booktitle = {INFOCOM}, 
  organization = {IEEE}, 
  year = {2006}, 
  publisher = {IEEE}, 
  abstract = {Evaluation of trustworthiness of participating entities is an effective
        method to stimulate collaboration and improve network security in distributed
        networks. Similar to other security related protocols, trust evaluation is an
        attractive target for adversaries. Currently, the vulnerabilities of trust
        evaluation system have not been well understood. In this paper, we present
        several attacks that can undermine the accuracy of trust evaluation, and then
        develop defense techniques. Based on our investigation on attacks and defense, we
        implement a trust evaluation system in ad hoc networks for securing ad hoc
        routing and assisting malicious node detection. Extensive simulations are
        performed to illustrate various attacks, the effectiveness of the proposed
        defense techniques, and the overall performance of the trust evaluation system}, 
  www_section = {ad-hoc networks}, 
  isbn = {1-4244-0349-9}, 
  doi = {10.1109/CISS.2006.286695}, 
  url = {http://dblp.uni-trier.de/db/conf/infocom/infocom2006.html$\#$SunHYL06}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Trust_infocom06_v4.pdf},
}
conf/infocom/WangLX08
@conference{conf/infocom/WangLX08,
  title = {Stable Peers: Existence, Importance, and Application in Peer-to-Peer Live Video
        Streaming}, 
  author = {Wang, Feng and Liu, Jiangchuan and Xiong, Yongqiang}, 
  booktitle = {INFOCOM'08. Proceedings of the 27th IEEE International Conference on
        Computer Communications}, 
  organization = {IEEE Computer Society}, 
  year = {2008}, 
  month = {April}, 
  address = {Phoenix, AZ, USA}, 
  pages = {1364--1372}, 
  publisher = {IEEE Computer Society}, 
  abstract = {This paper presents a systematic in-depth study on the existence, importance,
        and application of stable nodes in peer- to-peer live video streaming. Using
        traces from a real large-scale system as well as analytical models, we show that,
        while the number of stable nodes is small throughout a whole session, their
        longer lifespans make them constitute a significant portion in a per-snapshot
        view of a peer-to-peer overlay. As a result, they have substantially affected the
        performance of the overall system. Inspired by this, we propose a tiered overlay
        design, with stable nodes being organized into a tier-1 backbone for serving
        tier-2 nodes. It offers a highly cost-effective and deployable alternative to
        proxy-assisted designs. We develop a comprehensive set of algorithms for stable
        node identification and organization. Specifically, we present a novel structure,
        Labeled Tree, for the tier-1 overlay, which, leveraging stable peers,
        simultaneously achieves low overhead and high transmission reliability. Our
        tiered framework flexibly accommodates diverse existing overlay structures in the
        second tier. Our extensive simulation results demonstrated that the customized
        optimization using selected stable nodes boosts the streaming quality and also
        effectively reduces the control overhead. This is further validated through
        prototype experiments over the PlanetLab network}, 
  www_section = {peer-to-peer live video streaming, stable peer}, 
  isbn = {978-1-4244-2025-4}, 
  doi = {http://dx.doi.org/10.1109/INFOCOM.2008.194}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/INFOCOM\%2708\%20-\%20Stable\%20peers.PDF},
}
conf/infocom/WuLR09
@conference{conf/infocom/WuLR09,
  title = {Queuing Network Models for Multi-Channel P2P Live Streaming Systems}, 
  author = {Wu, Di and Yong Liu and Keith W. Ross}, 
  booktitle = {INFOCOM'09. Proceedings of the 28th IEEE International Conference on
        Computer Communications}, 
  organization = {IEEE Computer Society}, 
  year = {2009}, 
  month = {April}, 
  address = {Rio de Janeiro, Brazil}, 
  pages = {73--81}, 
  publisher = {IEEE Computer Society}, 
  abstract = {In recent years there have been several large-scale deployments of P2P live
        video systems. Existing and future P2P live video systems will offer a large
        number of channels, with users switching frequently among the channels. In this
        paper, we develop infinite-server queueing network models to analytically study
        the performance of multi-channel P2P streaming systems. Our models capture
        essential aspects of multi-channel video systems, including peer channel
        switching, peer churn, peer bandwidth heterogeneity, and Zipf-like channel
        popularity. We apply the queueing network models to two P2P streaming designs:
        the isolated channel design (ISO) and the View-Upload Decoupling (VUD) design.
        For both of these designs, we develop efficient algorithms to calculate critical
        performance measures, develop an asymptotic theory to provide closed-form results
        when the number of peers approaches infinity, and derive near- optimal
        provisioning rules for assigning peers to groups in VUD. We use the analytical
        results to compare VUD with ISO. We show that VUD design generally performs
        significantly better, particularly for systems with heterogeneous channel
        popularities and streaming rates}, 
  www_section = {dblp, multi-channel, p2p streaming system}, 
  isbn = {978-1-4244-3512-8}, 
  doi = {http://dx.doi.org/10.1109/INFCOM.2009.5061908}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/INFOCOM\%2709\%20-\%20Queusing\%20models\%20for\%20p2p\%20streaming\%20systems.pdf},
}
conf/ndss/BackesMP11
@conference{conf/ndss/BackesMP11,
  title = {A Security API for Distributed Social Networks}, 
  author = {Michael Backes and Maffei, Matteo and Pecina, Kim}, 
  booktitle = {NDSS'11--Proceedings of the Network and Distributed Security Symposium}, 
  organization = {The Internet Society}, 
  year = {2011}, 
  month = {February}, 
  address = {San Diego, CA, USA}, 
  publisher = {The Internet Society}, 
  abstract = {We present a cryptographic framework to achieve access control, privacy of
        social relations, secrecy of resources, and anonymity of users in social
        networks. We illustrate our technique on a core API for social networking, which
        includes methods for establishing social relations and for sharing resources. The
        cryptographic protocols implementing these methods use pseudonyms to hide user
        identities, signatures on these pseudonyms to establish social relations, and
        zero-knowledge proofs of knowledge of such signatures to demonstrate the
        existence of social relations without sacrificing user anonymity. As we do not
        put any constraints on the underlying social network, our framework is generally
        applicable and, in particular, constitutes an ideal plug-in for decentralized
        social networks. We analyzed the security of our protocols by developing formal
        definitions of the aforementioned security properties and by verifying them using
        ProVerif, an automated theorem prover for cryptographic protocols. Finally, we
        built a prototypical implementation and conducted an experimental evaluation to
        demonstrate the efficiency and the scalability of our framework}, 
  www_section = {API, online-social-networks, security}, 
  url = {http://www.lbs.cs.uni-saarland.de/publications/sapi.pdf}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/NDSS\%2711\%20-\%20Security\%20API\%20for\%20Distributed\%20Social\%20Networks.pdf},
}
conf/p2p/GotzRW05
@book{conf/p2p/GotzRW05,
  title = {Selected DHT Algorithms}, 
  author = {G{\"o}tz, Stefan and Rieche, Simon and Klaus Wehrle}, 
  booktitle = {Peer-to-Peer Systems and Applications}, 
  organization = {Springer}, 
  volume = {3485}, 
  year = {2005}, 
  chapter = {8}, 
  pages = {95--117}, 
  publisher = {Springer}, 
  series = {Lecture Notes in Computer Science}, 
  abstract = {Several different approaches to realizing the basic principles of DHTs have
        emerged over the last few years. Although they rely on the same fundamental idea,
        there is a large diversity of methods for both organizing the identifier space
        and performing routing. The particular properties of each approach can thus be
        exploited by specific application scenarios and requirements. This overview
        focuses on the three DHT systems that have received the most attention in the
        research community: Chord, Pastry, and Content Addressable Networks (CAN).
        Furthermore, the systems Symphony, Viceroy, and Kademlia are discussed because
        they exhibit interesting mechanisms and properties beyond those of the first
        three systems}, 
  www_section = {CAN, Chord, Content Addressable Networks, dblp, distributed hash table,
        Kademlia, Pastry, Symphony, Viceroy}, 
  isbn = {3-540-29192-X}, 
  url = {http://dblp.uni-trier.de/db/conf/p2p/p2p2005lncs.html$\#$GotzRW05}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Lecture\%20Notes\%20in\%20Computer\%20Science\%20-\%20Selected\%20DHT\%20Algorithms\%20.pdf},
}
congestion-tor12
@conference{congestion-tor12,
  title = {Congestion-aware Path Selection for Tor}, 
  author = {Tao Wang and Kevin Bauer and Clara Forero and Ian Goldberg}, 
  booktitle = {FC'12--Proceedings of the 16th International Conference in Financial
        Cryptography and Data Security}, 
  year = {2012}, 
  month = {February}, 
  address = {Bonaire}, 
  abstract = {Tor, an anonymity network formed by volunteer nodes, uses the estimated
        bandwidth of the nodes as a central feature of its path selection algorithm. The
        current load on nodes is not considered in this algorithm, however, and we
        observe that some nodes persist in being under-utilized or congested. This can
        degrade the network's performance, discourage Tor adoption, and consequently
        reduce the size of Tor's anonymity set. In an effort to reduce congestion and
        improve load balancing, we propose a congestion-aware path selection algorithm.
        Using latency as an indicator of congestion, clients use opportunistic and
        lightweight active measurements to evaluate the congestion state of nodes, and
        reject nodes that appear congested. Through experiments conducted on the live Tor
        network, we verify our hypothesis that clients can infer congestion using latency
        and show that congestion-aware path selection can improve performance}, 
  www_section = {algorithms, Tor, volunteer nodes}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/FC\%2712\%20-\%20Congestion-aware\%20Path\%20Selection\%20for\%20Tor.pdf},
  url = {https://bibliography.gnunet.org}, 
}
consensus2016
@conference{consensus2016,
  title = {Byzantine Set-Union Consensus using Efficient Set Reconciliation}, 
  author = {Florian Dold and Christian Grothoff}, 
  booktitle = {International Conference on Availability, Reliability and Security (ARES)}, 
  year = {2016}, 
  month = {June}, 
  abstract = {Applications of secure multiparty computation such as certain electronic
        voting or auction protocols require Byzantine agreement on large sets of
        elements. Implementations proposed in the literature so far have relied on state
        machine replication, and reach agreement on each individual set element in
        sequence. We introduce set-union consensus, a specialization of Byzantine
        consensus that reaches agreement over whole sets. This primitive admits an
        efficient and simple implementation by the composition of Eppstein's set
        reconciliation protocol with Ben-Or's ByzConsensus protocol. A free software
        implementation of this construction is available in GNUnet. Experimental results
        indicate that our approach results in an efficient protocol for very large sets,
        especially in the absence of Byzantine faults. We show the versatility of
        set-union consensus by using it to implement distributed key generation, ballot
        collection and cooperative decryption for an electronic voting protocol
        implemented in GNUnet}, 
  www_section = {byzantine fault tolerance, consensus, GNUnet}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/consensus2016.pdf}, 
  www_tags = {selected}, 
  url = {https://bibliography.gnunet.org}, 
}
continual
@conference{continual,
  title = {Differential Privacy Under Continual Observation}, 
  author = {Dwork, Cynthia and Naor, Moni and Pitassi, Toniann and Rothblum, Guy N.}, 
  booktitle = {Proceedings of the 42nd ACM Symposium on Theory of Computing (STOC'10)}, 
  year = {2010}, 
  month = {June}, 
  pages = {715--724}, 
  url = {https://bibliography.gnunet.org}, 
  www_section = {Unsorted}, 
}
cooper
@conference{cooper,
  title = {Preserving Privacy in a Network of Mobile Computers}, 
  author = {David A. Cooper and Kenneth P. Birman}, 
  booktitle = {Proceedings of the 1995 IEEE Symposium on Security and Privacy}, 
  organization = {IEEE Computer Society}, 
  year = {1995}, 
  month = {May}, 
  publisher = {IEEE Computer Society}, 
  abstract = {Even as wireless networks create the potential for access to information from
        mobile platforms, they pose a problem for privacy. In order to retrieve messages,
        users must periodically poll the network. The information that the user must give
        to the network could potentially be used to track that user. However, the
        movements of the user can also be used to hide the user's location if the
        protocols for sending and retrieving messages are carefully designed. We have
        developed a replicated memory service which allows users to read from memory
        without revealing which memory locations they are reading. Unlike previous
        protocols, our protocol is efficient in its use of computation and bandwidth. We
        show how this protocol can be used in conjunction with existing privacy
        preserving protocols to allow a user of a mobile computer to maintain privacy
        despite active attacks}, 
  url = {http://portal.acm.org/citation.cfm?id=882491.884247}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/cooper.pdf}, 
  www_section = {Unsorted}, 
}
cosic-2007-001
@booklet{cosic-2007-001,
  title = {The Byzantine Postman Problem: A Trivial Attack Against PIR-based Nym Servers}, 
  author = {Len Sassaman and Bart Preneel}, 
  number = {ESAT-COSIC 2007-001}, 
  year = {2007}, 
  month = {February}, 
  publisher = {Katholieke Universiteit Leuven}, 
  abstract = {Over the last several decades, there have been numerous proposals for systems
        which can preserve the anonymity of the recipient of some data. Some have
        involved trusted third-parties or trusted hardware; others have been constructed
        on top of link-layer anonymity systems or mix-nets. In this paper, we evaluate a
        pseudonymous message system which takes the different approach of using Private
        Information Retrieval (PIR) as its basis. We expose a flaw in the system as
        presented: it fails to identify Byzantine servers. We provide suggestions on
        correcting the flaw, while observing the security and performance trade-offs our
        suggestions require}, 
  www_section = {anonymity, private information retrieval, pseudonym}, 
  url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.70.1013}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/cosic-2007-001.pdf}, 
}
cramer05isprp
@conference{cramer05isprp,
  title = {ISPRP: A Message-Efficient Protocol for Initializing Structured P2P Networks}, 
  author = {Cramer, Curt and Thomas Fuhrmann}, 
  booktitle = {Proceedings of the 24th IEEE International Performance, Computing, and
        Communications Conference (IPCCC)}, 
  year = {2005}, 
  address = {Phoenix, AZ}, 
  pages = {365--370}, 
  type = {publication}, 
  abstract = {Most research activities in the field of peer-to-peer (P2P) computing are
        concerned with routing in virtualized overlay networks. These overlays generally
        assume node connectivity to be provided by an underlying network-layer routing
        protocol. This duplication of functionality can give rise to severe
        inefficiencies. In contrast, we suggest a cross-layer approach where the P2P
        overlay network also provides the required network-layer routing functionality by
        itself. Especially in sensor networks, where special attention has to be paid to
        the nodes' limited capabilities, this can greatly help in reducing the message
        overhead. In this paper, we present a key building block for such a protocol, the
        iterative successor pointer rewiring protocol (ISPRP), which efficiently
        initializes a P2P routing network among a freshly deployed set of nodes having
        but link-layer connectivity. ISPRP works in a fully self-organizing way and
        issues only a small per-node amount of messages by keeping interactions between
        nodes as local as possible}, 
  www_section = {P2P}, 
  url = {http://i30www.ira.uka.de/research/publications/p2p/}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/cramer05isprp.pdf}, 
}
cramer05pns
@conference{cramer05pns,
  title = {Proximity Neighbor Selection for a DHT in Wireless Multi-Hop Networks}, 
  author = {Cramer, Curt and Thomas Fuhrmann}, 
  booktitle = {Proceedings of the 5th IEEE International Conference on Peer-to-Peer
        Computing}, 
  organization = {IEEE Computer Society Washington, DC, USA}, 
  year = {2005}, 
  address = {Konstanz, Germany}, 
  pages = {3--10}, 
  publisher = {IEEE Computer Society Washington, DC, USA}, 
  type = {publication}, 
  abstract = {A mobile ad hoc network (MANET) is a multi-hop wireless network having no
        infrastructure. Thus, the mobile nodes have to perform basic control tasks, such
        as routing, and higher-level tasks, such as service discovery, in a cooperative
        and distributed way. Originally conceived as a peer-to-peer application for the
        Internet, distributed hash tables (DHTs) are data structures offering both,
        scalable routing and a convenient abstraction for the design of applications in
        large, dynamic networks. Hence, DHTs and MANETs seem to be a good match, and both
        have to cope with dynamic, self-organizing networks. DHTs form a virtual control
        structure oblivious to the underlying network. Several techniques to improve the
        performance of DHTs in wired networks have been established in the literature. A
        particularly efficient one is proximity neighbor selection (PNS). PNS has to
        continuously adapt the virtual network to the physical network, incurring control
        traffic. The applicability of PNS and DHTs for MANETs commonly is regarded as
        hard because of this control traffic,the complexity of the adaptation algorithms,
        and the dynamics of a MANET. Using simulations supported by analytical methods,
        we show that by making a minor addition to PNS, it is also applicable for MANETs.
        We additionally show that the specifics of a MANET make PNS an easy exercise
        there. Thus, DHTs deliver good performance in MANETs}, 
  www_section = {distributed hash table, multi-hop networks, proximity neighbor selection}, 
  isbn = {0-7695-2376-5}, 
  doi = {10.1109/P2P.2005.28}, 
  url = {http://i30www.ira.uka.de/research/publications/p2p/}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/cramer05pns.pdf}, 
}
cramer05selfstabilizing
@conference{cramer05selfstabilizing,
  title = {Self-Stabilizing Ring Networks on Connected Graphs}, 
  author = {Cramer, Curt and Thomas Fuhrmann}, 
  booktitle = {Self-Stabilizing Ring Networks on Connected Graphs}, 
  year = {2005}, 
  address = {University of Karlsruhe (TH), Fakultaet fuer Informatik, Technical Report
        2005-5}, 
  type = {Technical Report}, 
  abstract = {Large networks require scalable routing. Traditionally, protocol overhead is
        reduced by introducing a hierarchy. This requires aggregation of nearby nodes
        under a common address prefix. In fixed networks, this is achieved
        administratively, whereas in wireless ad-hoc networks, dynamic assignments of
        nodes to aggregation units are required. As a result of the nodes commonly being
        assigned a random network address, the majority of proposed ad-hoc routing
        protocols discovers routes between end nodes by flooding, thus limiting the
        network size. Peer-to-peer (P2P) overlay networks offer scalable routing
        solutions by employing virtualized address spaces, yet assume an underlying
        routing protocol for end-to-end connectivity. We investigate a cross-layer
        approach to P2P routing, where the virtual address space is implemented with a
        network-layer routing protocol by itself. The Iterative Successor Pointer
        Rewiring Protocol (ISPRP) efficiently initializes a ring-structured network among
        nodes having but link-layer connectivity. It is fully self-organizing and issues
        only a small per-node amount of messages by keeping interactions between nodes as
        local as possible. The main contribution of this paper is a proof that ISPRP is
        self-stabilizing, that is, starting from an arbitrary initial state, the protocol
        lets the network converge into a correct state within a bounded amount of time}, 
  www_section = {ad-hoc networks, P2P}, 
  url = {http://i30www.ira.uka.de/research/publications/p2p/}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/cramer05selfstabilizing.pdf},
}
cramer06bootstrapping
@conference{cramer06bootstrapping,
  title = {Bootstrapping Chord in Ad hoc Networks: Not Going Anywhere for a While}, 
  author = {Cramer, Curt and Thomas Fuhrmann}, 
  booktitle = {Proceedings of the 3rd IEEE International Workshop on Mobile Peer-to-Peer
        Computing}, 
  year = {2006}, 
  address = {Pisa, Italy}, 
  type = {publication}, 
  abstract = {With the growing prevalence of wireless devices, infrastructure-less ad hoc
        networking is coming closer to reality. Research in this field has mainly been
        concerned with routing. However, to justify the relevance of ad hoc networks,
        there have to be applications. Distributed applications require basic services
        such as naming. In an ad hoc network, these services have to be provided in a
        decentralized way. We believe that structured peer-to-peer overlays are a good
        basis for their design. Prior work has been focused on the long-run performance
        of virtual peer-to-peer overlays over ad hoc networks. In this paper, we consider
        a vital functionality of any peer-to-peer network: bootstrapping. We formally
        show that the self-configuration process of a spontaneously deployed Chord
        network has a time complexity linear in the network size. In addition to that,
        its centralized bootstrapping procedure causes an unfavorable traffic load
        imbalance}, 
  www_section = {ad-hoc networks, overlay networks, traffic analysis}, 
  doi = {10.1109/PERCOMW.2006.28}, 
  url = {http://i30www.ira.uka.de/research/publications/p2p/}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/cramer06bootstrapping.pdf},
}
crowds-model
@article{crowds-model,
  title = {Probabilistic Model Checking of an Anonymity System}, 
  author = {Vitaly Shmatikov}, 
  journal = {Journal of Computer Security}, 
  volume = {12}, 
  number = {3-4}, 
  year = {2004}, 
  pages = {355--377}, 
  abstract = {We use the probabilistic model checker PRISM to analyze the Crowds system for
        anonymous Web browsing. This case study demonstrates how probabilistic model
        checking techniques can be used to formally analyze security properties of a
        peer-to-peer group communication system based on random message routing among
        members. The behavior of group members and the adversary is modeled as a
        discrete-time Markov chain, and the desired security properties are expressed as
        PCTL formulas. The PRISM model checker is used to perform automated analysis of
        the system and verify anonymity guarantees it provides. Our main result is a
        demonstration of how certain forms of probabilistic anonymity degrade when group
        size increases or random routing paths are rebuilt, assuming that the corrupt
        group members are able to identify and/or correlate multiple routing paths
        originating from the same sender}, 
  www_section = {anonymity, P2P, routing}, 
  url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.10.6570}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/shmat_crowds.pdf}, 
}
cryptoeprint:2005:394
@booklet{cryptoeprint:2005:394,
  title = {Obfuscated Ciphertext Mixing}, 
  author = {Ben Adida and Douglas Wikstr{\"o}m}, 
  year = {2005}, 
  month = {November}, 
  abstract = {Mixnets are a type of anonymous channel composed of a handful of trustees
        that, each in turn, shu$\#$e and rerandomize a batch ciphertexts. For
        applications that require verifiability, each trustee provides a proof of correct
        mixing. Though mixnets have recently been made quite e$\#$cient, they still
        require secret computation and proof generation after the mixing process. We
        introduce and implement Obfuscated Ciphertext Mixing, the obfuscation of a mixnet
        program. Using this technique, all proofs can be performed before the mixing
        process, even before the inputs are available. In addition, the mixing program
        does not need to be secret: anyone can publicly compute the shuffle (though not
        the decryption). We frame this functionality in the strongest obfuscation setting
        proposed by Barak et. al. [4], tweaked for the public-key setting. For
        applications where the secrecy of the shuffle permutation is particularly
        important (e.g. voting), we also consider the Distributed Obfuscation of a Mixer,
        where multiple trustees cooperate to generate an obfuscated mixer program such
        that no single trustee knows the composed shuffle permutation}, 
  www_section = {obfuscated ciphertext mixing}, 
  url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.60.6592}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/cryptoeprint-2005-394.pdf},
}
cryptoeprint:2010:264
@booklet{cryptoeprint:2010:264,
  title = {Cryptographic Extraction and Key Derivation: The HKDF Scheme}, 
  author = {Hugo Krawczyk}, 
  year = {2010}, 
  publisher = {unknown}, 
  note = {\url{http://eprint.iacr.org/}}, 
  abstract = {In spite of the central role of key derivation functions (KDF) in applied
        cryptography, there has been little formal work addressing the design and
        analysis of general multi-purpose KDFs. In practice, most KDFs (including those
        widely standardized) follow ad-hoc approaches that treat cryptographic hash
        functions as perfectly random functions. In this paper we close some gaps between
        theory and practice by contributing to the study and engineering of KDFs in
        several ways. We provide detailed rationale for the design of KDFs based on the
        extract-then-expand approach; we present the first general and rigorous
        definition of KDFs and their security which we base on the notion of
        computational extractors; we specify a concrete fully practical KDF based on the
        HMAC construction; and we provide an analysis of this construction based on the
        extraction and pseudorandom properties of HMAC. The resultant KDF design can
        support a large variety of KDF applications under suitable assumptions on the
        underlying hash function; particular attention and effort is devoted to
        minimizing these assumptions as much as possible for each usage scenario. Beyond
        the theoretical interest in modeling KDFs, this work is intended to address two
        important and timely needs of cryptographic applications: (i) providing a single
        hash-based KDF design that can be standardized for use in multiple and diverse
        applications, and (ii) providing a conservative, yet efficient, design that
        exercises much care in the way it utilizes a cryptographic hash function. (The
        HMAC-based scheme presented here, named HKDF, is being standardized by the
        IETF.)}, 
  www_section = {GNUnet, HKDF, HMAC, key derivation}, 
  url = {http://eprint.iacr.org/2010/264}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/264.pdf}, 
}
cryptoeprint:2011:232
@article{cryptoeprint:2011:232,
  title = {Remote Timing Attacks are Still Practical}, 
  author = {Billy Bob Brumley and Nicola Tuveri}, 
  journal = {unknown}, 
  institution = {Cryptology ePrint Archive}, 
  year = {2011}, 
  month = {April}, 
  note = {\url{http://eprint.iacr.org/}}, 
  abstract = {For over two decades, timing attacks have been an active area of research
        within applied cryptography. These attacks exploit cryptosystem or protocol
        implementations that do not run in constant time. When implementing an elliptic
        curve cryptosystem with a goal to provide side-channel resistance, the scalar
        multiplication routine is a critical component. In such instances, one attractive
        method often suggested in the literature is Montgomery's ladder that performs a
        fixed sequence of curve and field operations. This paper describes a timing
        attack vulnerability in OpenSSL's ladder implementation for curves over binary
        fields. We use this vulnerability to steal the private key of a TLS server where
        the server authenticates with ECDSA signatures. Using the timing of the exchanged
        messages, the messages themselves, and the signatures, we mount a lattice attack
        that recovers the private key. Finally, we describe and implement an effective
        countermeasure}, 
  www_section = {elliptic curve cryptography, lattice attacks, public-key cryptography,
        side-channel attacks, timing attacks}, 
  issn = {2011/232}, 
  url = {http://eprint.iacr.org/2011/232}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Brumley\%20\%26\%20Tuveri\%20-\%20Timing\%20Attacks.pdf},
}
cset11-experimentor
@conference{cset11-experimentor,
  title = {ExperimenTor: A Testbed for Safe and Realistic Tor Experimentation}, 
  author = {Kevin Bauer and Micah Sherr and Damon McCoy and Dirk Grunwald}, 
  booktitle = {CSET'11--Proceedings of the USENIX Workshop on Cyber Security
        Experimentation and Test}, 
  year = {2011}, 
  month = {August}, 
  address = {San Francisco, CA, USA}, 
  abstract = {Tor is one of the most widely-used privacy enhancing technologies for
        achieving online anonymity and resisting censorship. Simultaneously, Tor is also
        an evolving research network on which investigators perform experiments to
        improve the network's resilience to attacks and enhance its performance. Existing
        methods for studying Tor have included analytical modeling, simulations,
        small-scale network emulations, small-scale PlanetLab deployments, and
        measurement and analysis of the live Tor network. Despite the growing body of
        work concerning Tor, there is no widely accepted methodology for conducting Tor
        research in a manner that preserves realism while protecting live users' privacy.
        In an effort to propose a standard, rigorous experimental framework for
        conducting Tor research in a way that ensures safety and realism, we present the
        design of ExperimenTor, a large-scale Tor network emulation toolkit and testbed.
        We also report our early experiences with prototype testbeds currently deployed
        at four research institutions}, 
  www_section = {experimentation, ExperimenTor, privacy enhancing technologies, Tor}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/CSET\%2711\%20-\%20ExperimenTor.pdf},
  url = {https://bibliography.gnunet.org}, 
}
cset2011evans
@conference{cset2011evans,
  title = {Beyond Simulation: Large-Scale Distributed Emulation of P2P Protocols}, 
  author = {Nathan S Evans and Christian Grothoff}, 
  booktitle = {4th Workshop on Cyber Security Experimentation and Test (CSET 2011)}, 
  organization = {USENIX Association}, 
  year = {2011}, 
  address = {San Francisco, California}, 
  publisher = {USENIX Association}, 
  abstract = {This paper presents details on the design and implementation of a scalable
        framework for evaluating peer-to-peer protocols. Unlike systems based on
        simulation, emulation-based systems enable the experimenter to obtain data that
        reflects directly on the concrete implementation in much greater detail. This
        paper argues that emulation is a better model for experiments with peer-to-peer
        protocols since it can provide scalability and high flexibility while eliminating
        the cost of moving from experimentation to deployment. We discuss our unique
        experience with large-scale emulation using the GNUnet peer-to-peer framework and
        provide experimental results to support these claims}, 
  www_section = {distributed hash table, emulation, GNUnet, scalability, security
        analysis}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/cset2011.pdf}, 
  url = {https://bibliography.gnunet.org}, 
}
cview:pet2006
@conference{cview:pet2006,
  title = {Linking Anonymous Transactions: The Consistent View Attack}, 
  author = {Andreas Pashalidis and Bernd Meyer}, 
  booktitle = {Proceedings of the Sixth Workshop on Privacy Enhancing Technologies (PET
        2006)}, 
  organization = {Springer}, 
  year = {2006}, 
  month = {June}, 
  address = {Cambridge, UK}, 
  pages = {384--392}, 
  editor = {George Danezis and Philippe Golle}, 
  publisher = {Springer}, 
  abstract = {In this paper we study a particular attack that may be launched by
        cooperating organisations in order to link the transactions and the pseudonyms of
        the users of an anonymous credential system. The results of our analysis are both
        positive and negative. The good (resp. bad) news, from a privacy protection
        (resp. evidence gathering) viewpoint, is that the attack may be computationally
        intensive. In particular, it requires solving a problem that is polynomial time
        equivalent to ALLSAT . The bad (resp. good) news is that a typical instance of
        this problem may be efficiently solvable}, 
  www_section = {privacy, pseudonym}, 
  isbn = {978-3-540-68790-0}, 
  doi = {10.1007/11957454}, 
  url = {http://www.springerlink.com/content/y6l6412387663581/}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/cview-pet2006.pdf}, 
}
danezis-pet2007
@conference{danezis-pet2007,
  title = {Two-Sided Statistical Disclosure Attack}, 
  author = {George Danezis and Claudia Diaz and Carmela Troncoso}, 
  booktitle = {Proceedings of the Seventh Workshop on Privacy Enhancing Technologies (PET
        2007)}, 
  organization = {Springer}, 
  year = {2007}, 
  month = {June}, 
  address = {Ottawa, Canada}, 
  editor = {Borisov, Nikita and Philippe Golle}, 
  publisher = {Springer}, 
  abstract = {We introduce a new traffic analysis attack: the Two-sided Statistical
        Disclosure Attack, that tries to uncover the receivers of messages sent through
        an anonymizing network supporting anonymous replies. We provide an abstract model
        of an anonymity system with users that reply to messages. Based on this model, we
        propose a linear approximation describing the likely receivers of sent messages.
        Using simulations, we evaluate the new attack given different traffic
        characteristics and we show that it is superior to previous attacks when replies
        are routed in the system}, 
  www_section = {anonymity, traffic analysis}, 
  url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.78.7347}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/danezis-pet2007.pdf}, 
}
danezis-pet2008
@conference{danezis-pet2008,
  title = {Bridging and Fingerprinting: Epistemic Attacks on Route Selection}, 
  author = {George Danezis and Paul Syverson}, 
  booktitle = {Proceedings of the Eighth International Symposium on Privacy Enhancing
        Technologies (PETS 2008)}, 
  organization = {Springer}, 
  year = {2008}, 
  month = {July}, 
  address = {Leuven, Belgium}, 
  pages = {133--150}, 
  editor = {Borisov, Nikita and Ian Goldberg}, 
  publisher = {Springer}, 
  abstract = {Users building routes through an anonymization network must discover the
        nodes comprising the network. Yet, it is potentially costly, or even infeasible,
        for everyone to know the entire network. We introduce a novel attack, the route
        bridging attack, which makes use of what route creators do not know of the
        network. We also present new discussion and results concerning route
        fingerprinting attacks, which make use of what route creators do know of the
        network. We prove analytic bounds for both route fingerprinting and route
        bridging and describe the impact of these attacks on published anonymity-network
        designs. We also discuss implications for network scaling and client-server vs.
        peer-to-peer systems}, 
  www_section = {anonymity, P2P, route bridging attack}, 
  isbn = {978-3-540-70629-8}, 
  doi = {10.1007/978-3-540-70630-4}, 
  url = {http://www.springerlink.com/content/q2r7g81286026576/}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/danezis-pet2008.pdf}, 
}
danezis:pet2003
@conference{danezis:pet2003,
  title = {Mix-networks with Restricted Routes}, 
  author = {George Danezis}, 
  booktitle = {Proceedings of Privacy Enhancing Technologies workshop (PET 2003)}, 
  organization = {Springer-Verlag, LNCS 2760}, 
  year = {2003}, 
  month = {March}, 
  pages = {1--17}, 
  editor = {Roger Dingledine}, 
  publisher = {Springer-Verlag, LNCS 2760}, 
  abstract = {We present a mix network topology that is based on sparse expander graphs,
        with each mix only communicating with a few neighbouring others. We analyse the
        anonymity such networks provide, and compare it with fully connected mix networks
        and mix cascades. We prove that such a topology is e$\#$cient since it only
        requires the route length of messages to be relatively small in comparison with
        the number of mixes to achieve maximal anonymity. Additionally mixes can resist
        intersection attacks while their batch size, that is directly linked to the
        latency of the network, remains constant. A worked example of a network is also
        presented to illustrate how these results can be applied to create secure mix
        networks in practise}, 
  www_section = {anonymity, mix cascades, traffic analysis}, 
  url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.6.1188}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/danezis-pet2003.pdf}, 
}
danezis:pet2004
@conference{danezis:pet2004,
  title = {The Traffic Analysis of Continuous-Time Mixes}, 
  author = {George Danezis}, 
  booktitle = {Proceedings of Privacy Enhancing Technologies workshop (PET 2004)}, 
  organization = {Springer Berlin / Heidelberg}, 
  volume = {3424}, 
  year = {2004}, 
  month = {May}, 
  pages = {35--50}, 
  publisher = {Springer Berlin / Heidelberg}, 
  series = {LNCS}, 
  abstract = {We apply the information-theoretic anonymity metrics to continuous-time
        mixes, that individually delay messages instead of batching them. The anonymity
        of such mixes is measured based on their delay characteristics, and as an example
        the exponential mix (sg-mix) is analysed, simulated and shown to use the optimal
        strategy. We also describe a practical and powerful traffic analysis attack
        against connection based continuous-time mix networks, despite the presence of
        some cover traffic. Assuming a passive observer, the conditions are calculated
        that make tracing messages through the network possible}, 
  www_section = {anonymity, traffic analysis}, 
  isbn = {978-3-540-26203-9}, 
  doi = {10.1007/b136164}, 
  url = {http://www.springerlink.com/content/kgenxdaxkyey4ed2/}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/danezis-pet2004.pdf}, 
}
danezis:weis2006
@conference{danezis:weis2006,
  title = {The Economics of Mass Surveillance and the Questionable Value of Anonymous
        Communications}, 
  author = {George Danezis and Bettina Wittneben}, 
  booktitle = {Proceedings of the Fifth Workshop on the Economics of Information Security
        (WEIS 2006)}, 
  year = {2006}, 
  month = {June}, 
  address = {Cambridge, UK}, 
  editor = {Ross Anderson}, 
  abstract = {We present a model of surveillance based on social network theory, where
        observing one participant also leaks some information about third parties. We
        examine how many nodes an adversary has to observe in order to extract
        information about the network, but also how the method for choosing these nodes
        (target selection) greatly influences the resulting intelligence. Our results
        provide important insights into the actual security of anonymous communication,
        and their ability to minimise surveillance and disruption in a social network.
        They also allow us to draw interesting policy conclusions from published
        interception figures, and get a better estimate of the amount of privacy invasion
        and the actual volume of surveillance taking place}, 
  url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.60.9384}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/danezis-weis2006.pdf}, 
%%%%% ERROR: Missing field
% www_section = {?????},
}
danezis:wpes2003
@conference{danezis:wpes2003,
  title = {Heartbeat Traffic to Counter (n-1) Attacks}, 
  author = {George Danezis and Len Sassaman}, 
  booktitle = {Proceedings of the Workshop on Privacy in the Electronic Society (WPES
        2003)}, 
  organization = {ACM New York, NY, USA}, 
  year = {2003}, 
  month = {October}, 
  address = {Washington, DC, USA}, 
  publisher = {ACM New York, NY, USA}, 
  abstract = {A dummy traffic strategy is described that can be implemented by mix nodes in
        an anonymous communication network to detect and counter active (n--1) attacks
        and their variants. Heartbeat messages are sent anonymously from the mix node
        back to itself in order to establish its state of connectivity with the rest of
        the network. In case the mix is under attack, the flow of heartbeat messages is
        interrupted and the mix takes measures to preserve the quality of the anonymity
        it provides by introducing decoy messages}, 
  www_section = {anonymity, flooding attacks}, 
  isbn = {1-58113-776-1}, 
  doi = {10.1145/1005140.1005154}, 
  url = {http://portal.acm.org/citation.cfm?id=1005154}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/danezis-wpes2003.pdf}, 
}
danezis:wpes2004
@conference{danezis:wpes2004,
  title = {Minx: A simple and efficient anonymous packet format}, 
  author = {George Danezis and Ben Laurie}, 
  booktitle = {Proceedings of the Workshop on Privacy in the Electronic Society (WPES
        2004)}, 
  organization = {ACM New York, NY, USA}, 
  year = {2004}, 
  month = {October}, 
  address = {Washington, DC, USA}, 
  publisher = {ACM New York, NY, USA}, 
  abstract = {Minx is a cryptographic message format for encoding anonymous messages,
        relayed through a network of Chaumian mixes. It provides security against a
        passive adversary by completely hiding correspondences between input and output
        messages. Possibly corrupt mixes on the message path gain no information about
        the route length or the position of the mix on the route. Most importantly Minx
        resists active attackers that are prepared to modify messages in order to embed
        tags which they will try to detect elsewhere in the network. The proposed scheme
        imposes a low communication and computational overhead, and only combines well
        understood cryptographic primitives}, 
  www_section = {anonymity, tagging attack}, 
  isbn = {1-58113-968-3}, 
  doi = {10.1145/1029179.1029198}, 
  url = {http://portal.acm.org/citation.cfm?id=1029179.1029198}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/danezis-wpes2004.pdf}, 
}
decmon2014
@mastersthesis{decmon2014,
  title = {A Decentralized and Autonomous Anomaly Detection Infrastructure for
        Decentralized Peer-to-Peer Networks}, 
  author = {Omar Tarabai}, 
  volume = {Master}, 
  year = {2014}, 
  month = {October}, 
  pages = {0--63}, 
  type = {Master}, 
  abstract = {In decentralized networks, collecting and analysing information from the
        network is useful for developers and operators to monitor the behaviour and
        detect anomalies such as attacks or failures in both the overlay and underlay
        networks. But realizing such an infrastructure is hard to achieve due to the
        decentralized nature of the network especially if the anomaly occurs on systems
        not operated by developers or participants get separated from the collection
        points. In this thesis a decentralized monitoring infrastructure using a
        decentralized peer-to-peer network is developed to collect information and detect
        anomalies in a collaborative way without coordination by and in absence of a
        centralized infrastructure and report detected incidents to a monitoring
        infrastructure. We start by introducing background information about peer-to-peer
        networks, anomalies and anomaly detection techniques in literature. Then we
        present some of the related work regarding monitoring decentralized networks,
        anomaly detection and data aggregation in decentralized networks. Then we perform
        an analysis of the system objectives, target environment and the desired
        properties of the system. Then we design the system in terms of the overall
        structure and its individual components. We follow with details about the system
        implementation. Lastly, we evaluate the final system implementation against our
        desired objectives}, 
  www_section = {anomaly, censorship, detection, GNUnet}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/decmon_0.pdf}, 
  www_tags = {selected}, 
  url = {https://bibliography.gnunet.org}, 
}
desmedt
@conference{desmedt,
  title = {How To Break a Practical MIX and Design a New One}, 
  author = {Yvo Desmedt and Kaoru Kurosawa}, 
  booktitle = {Proceedings of EUROCRYPT 2000}, 
  organization = {Springer-Verlag, LNCS 1803}, 
  year = {2000}, 
  publisher = {Springer-Verlag, LNCS 1803}, 
  abstract = {A MIX net takes a list of ciphertexts (c 1, ..., c N) and outputs a permuted
        list of the plaintexts (m 1, ..., m N) without revealing the relationship between
        (c 1,..., c N) and (m 1, ...,m N). This paper first shows that the Jakobsson's
        MIX net of Eurocrypt'98, which was believed to be resilient and very efficient,
        is broken. We next propose an efficient t-resilient MIX net with O(t 2) servers
        in which the cost of each MIX server is O(N). Two new concepts are introduced,
        existential-honesty and limited-open-verification. They will be useful for
        distributed computation in general. A part of this research was done while the
        author visited the Tokyo Institute of Technology, March 4--19, 1999. He was then
        at the University of Wisconsin {\textemdash} Milwaukee}, 
  www_section = {existential-honesty, limited-open-verification, mix}, 
  isbn = {978-3-540-67517-4}, 
  doi = {10.1007/3-540-45539-6}, 
  url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.29.1814}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.29.1814.pdf}, 
}
di06linyphi
@conference{di06linyphi,
  title = {Linyphi: An IPv6-Compatible Implementation of SSR}, 
  author = {Di, Pengfei and Massimiliano Marcon and Thomas Fuhrmann}, 
  booktitle = {Proceedings of the Third International Workshop on Hot Topics in
        Peer-to-Peer Systems}, 
  year = {2006}, 
  address = {Rhodes Island, Greec}, 
  type = {publication}, 
  abstract = {Scalable source routing (SSR) is a self-organizing routing protocol designed
        for supporting peer-to-peer applications. It is especially suited for networks
        that do not have a well crafted structure, e. g. ad-hoc and mesh-networks. SSR is
        based on the combination of source routes and a virtual ring structure. This ring
        is used in a Chord-like manner to obtain source routes to destinations that are
        not yet in the respective router cache. This approach makes SSR more message
        efficient than flooding based ad-hoc routing protocols. Moreover, it directly
        provides the semantics of a structured routing overlay. In this paper we present
        Linyphi, an implementation of SSR for wireless accesses routers. Linyphi combines
        IPv6 and SSR so that unmodified IPv6 hosts have transparent connectivity to both
        the Linyphi mesh network and the IPv4/v6 Internet. We give a basic outline of the
        implementation and demonstrate its suitability in real-world mesh network
        scenarios. Linyphi is available for download (www.linyphi.net)}, 
  www_section = {scalable source routing}, 
  isbn = {1-4244-0054-6}, 
  url = {http://i30www.ira.uka.de/research/publications/p2p/}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/di06linyphi.pdf}, 
}
di07mass
@conference{di07mass,
  title = {Application of DHT-Inspired Routing for Object Tracking}, 
  author = {Di, Pengfei and Yaser Houri and Qing Wei and J{\"o}rg Widmer and Thomas
        Fuhrmann}, 
  booktitle = {Proceedings of 4th IEEE International Conference on Mobile Ad-hoc and Sensor
        Systems}, 
  year = {2007}, 
  address = {Pisa, Italy}, 
  type = {publication}, 
  abstract = {A major problem in tracking objects in sensor networks is trading off update
        traffic and timeliness of the data that is available to a monitoring site.
        Typically, either all objects regularly update some central registry with their
        location information, or the monitoring instance floods the network with a
        request when it needs information for a particular object. More sophisticated
        approaches use a P2P-like distributed storage structure on top of geographic
        routing. The applicability of the latter is limited to certain topologies, and
        having separate storage and routing algorithms reduces efficiency. In this paper,
        we present a different solution which is based on the scalable source routing
        (SSR) protocol. SSR is a network layer routing protocol that has been inspired by
        distributed hash tables (DHT). It provides key-based routing in large networks of
        resource-limited devices such as sensor networks. We argue that this approach is
        more suitable for object tracking in sensor networks because it evenly spreads
        the updates over the whole network without being limited to a particular network
        topology. We support our argument with extensive simulations}, 
  www_section = {distributed hash table, scalable source routing}, 
  isbn = {978-1-4244-1454-3}, 
  url = {http://i30www.ira.uka.de/research/publications/p2p/}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/di07mass.pdf}, 
}
di08iptps
@conference{di08iptps,
  title = {Providing KBR Service for Multiple Applications}, 
  author = {Di, Pengfei and Kendy Kutzner and Thomas Fuhrmann}, 
  booktitle = {The 7th International Workshop on Peer-to-Peer Systems (IPTPS '08)}, 
  year = {2008}, 
  address = {St. Petersburg, U.S}, 
  type = {publication}, 
  abstract = {Key based routing (KBR) enables peer-to-peer applications to create and use
        distributed services. KBR is more flexible than distributed hash tables (DHT).
        However, the broader the application area, the more important become performance
        issues for a KBR service. In this paper, we present a novel approach to provide a
        generic KBR service. Its key idea is to use a predictable address assignment
        scheme. This scheme allows peers to calculate the overlay address of the node
        that is responsible for a given key and application ID. A public DHT service such
        as OpenDHT can then resolve this overlay address to the transport address of the
        respective peer. We compare our solution to alternative proposals such as ReDiR
        and Diminished Chord. We conclude that our solution has a better worst case
        complexity for some important KBR operations and the required state. In
        particular, unlike ReDiR, our solution can guarantee a low latency for KBR route
        operations}, 
  www_section = {distributed hash table, P2P}, 
  url = {http://i30www.ira.uka.de/research/publications/p2p/}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/di08iptps.pdf}, 
}
diaz-pet2008
@conference{diaz-pet2008,
  title = {On the Impact of Social Network Profiling on Anonymity}, 
  author = {Claudia Diaz and Carmela Troncoso and Andrei Serjantov}, 
  booktitle = {Proceedings of the Eighth International Symposium on Privacy Enhancing
        Technologies (PETS 2008)}, 
  organization = {Springer}, 
  year = {2008}, 
  month = {July}, 
  address = {Leuven, Belgium}, 
  pages = {44--62}, 
  editor = {Borisov, Nikita and Ian Goldberg}, 
  publisher = {Springer}, 
  abstract = {This paper studies anonymity in a setting where individuals who communicate
        with each other over an anonymous channel are also members of a social network.
        In this setting the social network graph is known to the attacker. We propose a
        Bayesian method to combine multiple available sources of information and obtain
        an overall measure of anonymity. We study the effects of network size and find
        that in this case anonymity degrades when the network grows. We also consider
        adversaries with incomplete or erroneous information; characterize their
        knowledge of the social network by its quantity, quality and depth; and discuss
        the implications of these properties for anonymity}, 
  www_section = {anonymity, attack}, 
  isbn = {978-3-540-70629-8}, 
  doi = {10.1007/978-3-540-70630-4_4}, 
  url = {http://portal.acm.org/citation.cfm?id=1428263}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/diaz-pet2008.pdf}, 
}
diaz-wpes2007
@conference{diaz-wpes2007,
  title = {Does additional information always reduce anonymity?}, 
  author = {Claudia Diaz and Carmela Troncoso and George Danezis}, 
  booktitle = {Proceedings of the Workshop on Privacy in the Electronic Society 2007}, 
  organization = {ACM New York, NY, USA}, 
  year = {2007}, 
  month = {October}, 
  address = {Alexandria,VA,USA}, 
  pages = {72--75}, 
  editor = {Ting Yu}, 
  publisher = {ACM New York, NY, USA}, 
  abstract = {We discuss information-theoretic anonymity metrics, that use entropy over the
        distribution of all possible recipients to quantify anonymity. We identify a
        common misconception: the entropy of the distribution describing the
        potentialreceivers does not always decrease given more information.We show the
        relation of these a-posteriori distributions with the Shannon conditional
        entropy, which is an average overall possible observations}, 
  www_section = {anonymity measurement, entropy, mix, user profiles}, 
  isbn = {978-1-59593-883-1}, 
  doi = {10.1145/1314333.1314347}, 
  url = {http://portal.acm.org/citation.cfm?id=1314333.1314347}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/diaz-wpes2007.pdf}, 
}
diaz:pet2003
@conference{diaz:pet2003,
  title = {Generalising Mixes}, 
  author = {Claudia Diaz and Andrei Serjantov}, 
  booktitle = {Proceedings of Privacy Enhancing Technologies workshop (PET 2003)}, 
  organization = {Springer-Verlag, LNCS 2760}, 
  year = {2003}, 
  month = {March}, 
  pages = {18--31}, 
  editor = {Roger Dingledine}, 
  publisher = {Springer-Verlag, LNCS 2760}, 
  abstract = {In this paper we present a generalised framework for expressing batching
        strategies of a mix. First, we note that existing mixes can be represented as
        functions from the number of messages in the mix to the fraction of messages to
        be flushed. We then show how to express existing mixes in the framework, and then
        suggest other mixes which arise out of that framework. We note that these cannot
        be expressed as pool mixes. In particular, we call binomial mix a timed pool mix
        that tosses coins and uses a probability function that depends on the number of
        messages inside the mix at the time of flushing. We discuss the properties of
        this mix}, 
  www_section = {mix}, 
  isbn = {978-3-540-20610-1}, 
  doi = {10.1007/b94512}, 
  url = {http://www.springerlink.com/content/jvuk0exyqxvcyhvy/}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.3.9155.pdf}, 
}
disad-free-routes
@conference{disad-free-routes,
  title = {The disadvantages of free MIX routes and how to overcome them}, 
  author = {Oliver Berthold and Andreas Pfitzmann and Ronny Standtke}, 
  booktitle = {Proceedings of Designing Privacy Enhancing Technologies: Workshop on Design
        Issues in Anonymity and Unobservability}, 
  organization = {Springer-Verlag, LNCS 2009}, 
  year = {2000}, 
  month = {July}, 
  pages = {30--45}, 
  publisher = {Springer-Verlag, LNCS 2009}, 
  abstract = {There are different methods to build an anonymity service using MIXes. A
        substantial decision for doing so is the method of choosing the MIX route. In
        this paper we compare two special configurations: a fixed MIX route used by all
        participants and a network of freely usable MIXes where each participant chooses
        his own route. The advantages and disadvantages in respect to the freedom of
        choice are presented and examined. We'll show that some additional attacks are
        possible in networks with freely chosen MIX routes. After describing these
        attacks, we estimate their impact on the achievable degree of anonymity. Finally,
        we evaluate the relevance of the described attacks with respect to existing
        systems like e.g. Mixmaster, Crowds, and Freedom}, 
  www_section = {anonymity, attack}, 
  isbn = {3-540-41724-9}, 
  url = {http://portal.acm.org/citation.cfm?id=371975}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/disad-free-routes.pdf}, 
}
dold-thesis2014voting
@mastersthesis{dold-thesis2014voting,
  title = {Cryptographically Secure, Distributed Electronic Voting}, 
  author = {Florian Dold}, 
  school = {Technische Universit{\"a}t M{\"u}nchen}, 
  volume = {B.S}, 
  year = {2014}, 
  month = {August}, 
  address = {M{\"u}nchen}, 
  pages = {0--49}, 
  type = {Bachelor's}, 
  abstract = {Elections are a vital tool for decision-making in democratic societies. The
        past decade has witnessed a handful of attempts to apply modern technology to the
        election process in order to make it faster and more cost-effective. Most of the
        practical efforts in this area have focused on replacing traditional voting
        booths with electronic terminals, but did not attempt to apply cryptographic
        techniques able to guarantee critical properties of elections such as secrecy of
        ballot and verifiability. While such techniques were extensively researched in
        the past 30 years, practical implementation of cryptographically secure remote
        electronic voting schemes are not readily available. All existing implementation
        we are aware of either exhibit critical security flaws, are proprietary black-box
        systems or require additional physical assumptions such as a preparatory key
        ceremony executed by the election officials. The latter makes such systems
        unusable for purely digital communities. This thesis describes the design and
        implementation of an electronic voting system in GNUnet, a framework for secure
        and decentralized networking. We provide a short survey of voting schemes and
        existing implementations. The voting scheme we implemented makes use of threshold
        cryptography, a technique which requires agreement among a large subset of the
        election officials to execute certain cryptographic operations. Since such
        protocols have applications outside of electronic voting, we describe their
        design and implementation in GNUnet separately}, 
  www_section = {GNUnet, secure multiparty computation, voting}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/ba_dold_voting_24aug2014.pdf},
  www_tags = {selected}, 
  url = {https://bibliography.gnunet.org}, 
}
dold2016byzantine
@conference{dold2016byzantine,
  title = {Byzantine Set-Union Consensus using Efficient Set Reconciliation}, 
  author = {Dold, Florian and Grothoff, Christian}, 
  booktitle = {International Conference on Availability, Reliability and Security (ARES)}, 
  year = {2016}, 
  www_section = {Unsorted}, 
  www_tags = {selected}, 
  url = {https://bibliography.gnunet.org}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/dold2016byzantine.pdf}, 
}
duminuco:hierarchical
@article{duminuco:hierarchical,
  title = {Hierarchical codes: A flexible trade-off for erasure codes in peer-to-peer
        storage systems}, 
  author = {Alessandro Duminuco and E W Biersack}, 
  journal = {Peer-to-Peer Networking and Applications}, 
  volume = {3}, 
  year = {2010}, 
  month = {March}, 
  pages = {52--66}, 
  abstract = {Redundancy is the basic technique to provide reliability in storage systems
        consisting of multiple components. A redundancy scheme defines how the redundant
        data are produced and maintained. The simplest redundancy scheme is replication,
        which however suffers from storage inefficiency. Another approach is erasure
        coding, which provides the same level of reliability as replication using a
        significantly smaller amount of storage. When redundant data are lost, they need
        to be replaced. While replacing replicated data consists in a simple copy, it
        becomes a complex operation with erasure codes: new data are produced performing
        a coding over some other available data. The amount of data to be read and coded
        is d times larger than the amount of data produced, where d, called repair
        degree, is larger than 1 and depends on the structure of the code. This implies
        that coding has a larger computational and I/O cost, which, for distributed
        storage systems, translates into increased network traffic. Participants of
        Peer-to-Peer systems often have ample storage and CPU power, but their network
        bandwidth may be limited. For these reasons existing coding techniques are not
        suitable for P2P storage. This work explores the design space between replication
        and the existing erasure codes. We propose and evaluate a new class of erasure
        codes, called Hierarchical Codes, which allows to reduce the network traffic due
        to maintenance without losing the benefits given by traditional erasure codes}, 
  www_section = {dependability, erasure codes, peer-to-peer networking, reliability,
        storage}, 
  doi = {10.1007/s12083-009-0044-8}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Duminuco\%20\%26\%20Biersack\%20-\%20Hierarchical\%20Codes.pdf},
  url = {https://bibliography.gnunet.org}, 
}
dwork02memorybound
@booklet{dwork02memorybound,
  title = {On memory-bound functions for fighting spam}, 
  author = {Cynthia Dwork and Andrew Goldberg and Moni Naor}, 
  year = {2002}, 
  abstract = {In 1992, Dwork and Naor proposed that e-mail messages be accompanied by
        easy-to-check proofs of computational effort in order to discourage junk e-mail,
        now known as spam. They proposed specific CPU-bound functions for this purpose.
        Burrows suggested that, since memory access speeds vary across machines much less
        than do CPU speeds, memory-bound functions may behave more equitably than
        CPU-bound functions; this approach was first explored by Abadi, Burrows, Manasse,
        and Wobber [5]. We further investigate this intriguing proposal. Specifically, we
        1) Provide a formal model of computation and a statement of the problem; 2)
        Provide an abstract function and prove an asymptotically tight amortized lower
        bound on the number of memory accesses required to compute an acceptable proof of
        effort; specifically, we prove that, on average, the sender of a message must
        perform many unrelated accesses to memory, while the receiver, in order to verify
        the work, has to perform significantly fewer accesses; 3) Propose a concrete
        instantiation of our abstract function, inspired by the RC4 stream cipher; 4)
        Describe techniques to permit the receiver to verify the computation with no
        memory accesses; 5) Give experimental results showing that our concrete
        memory-bound function is only about four times slower on a 233 MHz settop box
        than on a 3.06 GHz workstation, and that speedup of the function is limited even
        if an adversary knows the access sequence and uses optimal off-line cache
        replacement}, 
  doi = {10.1007/b11817}, 
  url = {citeseer.ist.psu.edu/dwork02memorybound.html}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/memory-bound-crypto.pdf},
%%%%% ERROR: Missing field
% www_section = {?????},
}
e2e-traffic
@conference{e2e-traffic,
  title = {Practical Traffic Analysis: Extending and Resisting Statistical Disclosure}, 
  author = {Nick Mathewson and Roger Dingledine}, 
  booktitle = {Proceedings of Privacy Enhancing Technologies workshop (PET 2004)}, 
  organization = {Springer Berlin / Heidelberg}, 
  volume = {3424}, 
  year = {2004}, 
  month = {May}, 
  pages = {17--34}, 
  publisher = {Springer Berlin / Heidelberg}, 
  series = {LNCS}, 
  abstract = {We extend earlier research on mounting and resisting passive long-term
        end-to-end traffic analysis attacks against anonymous message systems, by
        describing how an eavesdropper can learn sender-receiver connections even when
        the substrate is a network of pool mixes, the attacker is non-global, and senders
        have complex behavior or generate padding messages. Additionally, we describe how
        an attacker can use information about message distinguishability to speed the
        attack. We simulate our attacks for a variety of scenarios, focusing on the
        amount of information needed to link senders to their recipients. In each
        scenario, we show that the intersection attack is slowed but still succeeds
        against a steady-state mix network. We find that the attack takes an impractical
        amount of time when message delivery times are highly variable; when the attacker
        can observe very little of the network; and when users pad consistently and the
        adversary does not know how the network behaves in their absence}, 
  www_section = {traffic analysis}, 
  isbn = {978-3-540-26203-9}, 
  doi = {10.1007/b136164}, 
  url = {http://www.springerlink.com/content/v6m6cat1lxvbd4yd/}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/e2e-traffic.pdf}, 
}
ebe2003
@article{ebe2003,
  title = {An Excess-Based Economic Model for Resource Allocation in Peer-to-Peer
        Networks}, 
  author = {Christian Grothoff}, 
  journal = {Wirtschaftsinformatik}, 
  volume = {3-2003}, 
  year = {2003}, 
  month = {June}, 
  publisher = {Vieweg-Verlag}, 
  abstract = {This paper describes economic aspects of GNUnet, a peer-to-peer framework for
        anonymous distributed file-sharing. GNUnet is decentralized; all nodes are equal
        peers. In particular, there are no trusted entities in the network. This paper
        describes an economic model to perform resource allocation and defend against
        malicious participants in this context. The approach presented does not use
        credentials or payments; rather, it is based on trust. The design is much like
        that of a cooperative game in which peers take the role of players. Nodes must
        cooperate to achieve individual goals. In such a scenario, it is important to be
        able to distinguish between nodes exhibiting friendly behavior and those
        exhibiting malicious behavior. GNUnet aims to provide anonymity for its users.
        Its design makes it hard to link a transaction to the node where it originated
        from. While anonymity requirements make a global view of the end-points of a
        transaction infeasible, the local link-to-link messages can be fully
        authenticated. Our economic model is based entirely on this local view of the
        network and takes only local decisions}, 
  www_section = {anonymity, file-sharing, GNUnet}, 
  url = {http://grothoff.org/christian/ebe.pdf}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/ebe.pdf}, 
}
esed
@conference{esed,
  title = {Efficient Sharing of Encrypted Data}, 
  author = {Krista Bennett and Christian Grothoff and Tzvetan Horozov and Ioana Patrascu}, 
  booktitle = {Proceedings of ACSIP 2002}, 
  organization = {Springer-Verlag}, 
  year = {2002}, 
  address = {Melbourne, Australia}, 
  pages = {107--120}, 
  publisher = {Springer-Verlag}, 
  www_section = {censorship resistance, ECRS, encoding, file-sharing, GNUnet}, 
  url = {http://grothoff.org/christian/esed.pdf}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/esed.pdf}, 
}
esorics04-mauw
@conference{esorics04-mauw,
  title = {A formalization of anonymity and onion routing}, 
  author = {Sjouke Mauw and Jan Verschuren and Erik P. de Vink}, 
  booktitle = {Proceedings of ESORICS 2004}, 
  organization = {LNCS 3193}, 
  year = {2004}, 
  address = {Sophia Antipolis}, 
  pages = {109--124}, 
  publisher = {LNCS 3193}, 
  abstract = {The use of formal methods to verify security protocols with respect to
        secrecy and authentication has become standard practice. In contrast, the
        formalization of other security goals, such as privacy, has received less
        attention. Due to the increasing importance of privacy in the current society,
        formal methods will also become indispensable in this area. Therefore, we propose
        a formal definition of the notion of anonymity in presence of an observing
        intruder. We validate this definition by analyzing a well-known anonymity
        preserving protocol, viz. onion routing}, 
  www_section = {anonymity, onion routing, privacy}, 
  url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.75.2547}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/esorics04-mauw.pdf}, 
}
esorics05-Klonowski
@conference{esorics05-Klonowski,
  title = {Local View Attack on Anonymous Communication}, 
  author = {Marcin Gogolewski and Marek Klonowski and Miroslaw Kutylowski}, 
  booktitle = {Proceedings of ESORICS 2005}, 
  organization = {Springer Berlin / Heidelberg}, 
  year = {2005}, 
  month = {September}, 
  publisher = {Springer Berlin / Heidelberg}, 
  abstract = {We consider anonymous communication protocols based on onions: each message
        is sent in an encrypted form through a path chosen at random by its sender, and
        the message is re-coded by each server on the path. Recently, it has been shown
        that if the anonymous paths are long enough, then the protocols provide provable
        security for some adversary models. However, it was assumed that all users choose
        intermediate servers uniformly at random from the same set of servers. We show
        that if a single user chooses only from a constrained subset of possible
        intermediate servers, anonymity level may dramatically decrease. A thumb rule is
        that if Alice is aware of much less than 50\% of possible intermediate servers,
        then the anonymity set for her message becomes surprisingly small with high
        probability. Moreover, for each location in the anonymity set an adversary may
        compute probability that it gets a message of Alice. Since there are big
        differences in these probabilities, in most cases the true destination of the
        message from Alice is in a small group of locations with the highest
        probabilities. Our results contradict some beliefs that the protocols mentioned
        guarantee anonymity provided that the set of possible intermediate servers for
        each user is large}, 
  www_section = {anonymity measurement, onion routing}, 
  isbn = {978-3-540-28963-0}, 
  doi = {10.1007/11555827}, 
  url = {http://www.springerlink.com/content/ewblt5k80xrgqe4j/}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/esorics05-Klonowski.pdf},
}
evans2009tor
@conference{evans2009tor,
  title = {A Practical Congestion Attack on Tor Using Long Paths}, 
  author = {Nathan S Evans and Roger Dingledine and Christian Grothoff}, 
  booktitle = {18th USENIX Security Symposium}, 
  organization = {USENIX}, 
  year = {2009}, 
  pages = {33--50}, 
  publisher = {USENIX}, 
  abstract = {In 2005, Murdoch and Danezis demonstrated the first practical congestion
        attack against a deployed anonymity network. They could identify which relays
        were on a target Tor user's path by building paths one at a time through every
        Tor relay and introducing congestion. However, the original attack was performed
        on only 13 Tor relays on the nascent and lightly loaded Tor network. We show that
        the attack from their paper is no longer practical on today's 1500-relay heavily
        loaded Tor network. The attack doesn't scale because a) the attacker needs a
        tremendous amount of bandwidth to measure enough relays during the attack window,
        and b) there are too many false positives now that many other users are adding
        congestion at the same time as the attacks. We then strengthen the original
        congestion attack by combining it with a novel bandwidth amplification attack
        based on a flaw in the Tor design that lets us build long circuits that loop back
        on themselves. We show that this new combination attack is practical and
        effective by demonstrating a working attack on today's deployed Tor network. By
        coming up with a model to better understand Tor's routing behavior under
        congestion, we further provide a statistical analysis characterizing how
        effective our attack is in each case}, 
  www_section = {anonymity, attack, denial-of-service, installation, Tor}, 
  url = {http://grothoff.org/christian/tor.pdf}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/tor.pdf}, 
}
fairbrother:pet2004
@conference{fairbrother:pet2004,
  title = {An Improved Construction for Universal Re-encryption}, 
  author = {Peter Fairbrother}, 
  booktitle = {Proceedings of Privacy Enhancing Technologies workshop (PET 2004)}, 
  organization = {Springer Berlin / Heidelberg}, 
  volume = {3424}, 
  year = {2004}, 
  month = {May}, 
  pages = {79--87}, 
  publisher = {Springer Berlin / Heidelberg}, 
  series = {LNCS}, 
  abstract = {Golle et al recently introduced universal re-encryption, defining it as
        re-encryption by a player who does not know the key used for the original
        encryption, but which still allows an intended player to recover the plaintext.
        Universal re-encryption is potentially useful as part of many information-hiding
        techniques, as it allows any player to make ciphertext unidentifiable without
        knowing the key used. Golle et al's techniques for universal re-encryption are
        reviewed, and a hybrid universal re-encryption construction with improved work
        and space requirements which also permits indefinite re-encryptions is presented.
        Some implementational issues and optimisations are discussed}, 
  www_section = {information hiding, re-encryption}, 
  isbn = {978-3-540-26203-9}, 
  doi = {10.1007/b136164}, 
  url = {http://www.springerlink.com/content/q07439n27u1egx0w/}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/fairbrother-pet2004.pdf},
}
feamster:pet2003
@conference{feamster:pet2003,
  title = {Thwarding Web Censorship with Untrusted Messenger Delivery}, 
  author = {Nick Feamster and Magdalena Balazinska and Winston Wang and Hari Balakrishnan
        and David Karger}, 
  booktitle = {Proceedings of Privacy Enhancing Technologies workshop (PET 2003)}, 
  organization = {Springer-Verlag, LNCS 2760}, 
  year = {2003}, 
  month = {March}, 
  pages = {125--140}, 
  editor = {Roger Dingledine}, 
  publisher = {Springer-Verlag, LNCS 2760}, 
  abstract = {All existing anti-censorship systems for theWeb rely on proxies to grant
        clients access to censored information. Therefore, they face the proxy discovery
        problem: how can clients discover the proxies without having the censor discover
        and block these proxies? To avoid widespread discovery and blocking, proxies must
        not be widely published and should be discovered in-band. In this paper, we
        present a proxy discovery mechanism called keyspace hopping that meets this goal.
        Similar in spirit to frequency hopping in wireless networks, keyspace hopping
        ensures that each client discovers only a small fraction of the total number of
        proxies.However, requiring clients to independently discover proxies from a large
        set makes it practically impossible to verify the trustworthiness of every proxy
        and creates the possibility of having untrusted proxies. To address this, we
        propose separating the proxy into two distinct components|the messenger, which
        the client discovers using keyspace hopping and which simply acts as a gateway to
        the Internet; and the portal, whose identity is widely-published and whose
        responsibility it is to interpret and serve the client's requests for censored
        content. We show how this separation, as well as in-band proxy discovery, can be
        applied to a variety of anti-censorship systems}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/feamster-pet2003.pdf}, 
%%%%% ERROR: Missing field
% www_section = {?????},
}
feamster:wpes2004
@conference{feamster:wpes2004,
  title = {Location Diversity in Anonymity Networks}, 
  author = {Nick Feamster and Roger Dingledine}, 
  booktitle = {Proceedings of the Workshop on Privacy in the Electronic Society (WPES
        2004)}, 
  year = {2004}, 
  month = {October}, 
  address = {Washington, DC, USA}, 
  abstract = {Anonymity networks have long relied on diversity of node location for
        protection against attacks---typically an adversary who can observe a larger
        fraction of the network can launch a more effective attack. We investigate the
        diversity of two deployed anonymity networks, Mixmaster and Tor, with respect to
        an adversary who controls a single Internet administrative domain. Specifically,
        we implement a variant of a recently proposed technique that passively estimates
        the set of administrative domains (also known as autonomous systems, or ASes)
        between two arbitrary end-hosts without having access to either end of the path.
        Using this technique, we analyze the AS-level paths that are likely to be used in
        these anonymity networks. We find several cases in each network where multiple
        nodes are in the same administrative domain. Further, many paths between nodes,
        and between nodes and popular endpoints, traverse the same domain}, 
  www_section = {anonymity, autonomous systems}, 
  doi = {10.1145/1029179.1029199}, 
  url = {http://portal.acm.org/citation.cfm?id=1029199}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.10.6119.pdf}, 
}
feldman:hidden-action
@article{feldman:hidden-action,
  title = {Hidden-Action in Network Routing}, 
  author = {Michal Feldman and John Chuang and Ion Stoica and S Shenker}, 
  journal = {IEEE Journal on Selected Areas in Communications}, 
  volume = {25}, 
  year = {2007}, 
  month = {August}, 
  pages = {1161--1172}, 
  abstract = {In communication networks, such as the Internet or mobile ad-hoc networks,
        the actions taken by intermediate nodes or links are typically hidden from the
        communicating endpoints; all the endpoints can observe is whether or not the
        end-to-end transmission was successful. Therefore, in the absence of incentives
        to the contrary, rational (i.e., selfish) intermediaries may choose to forward
        messages at a low priority or simply not forward messages at all. Using a
        principal-agent model, we show how the hidden-action problem can be overcome
        through appropriate design of contracts in both the direct (the endpoints
        contract with each individual router directly) and the recursive (each router
        contracts with the next downstream router) cases. We further show that, depending
        on the network topology, per-hop or per-path monitoring may not necessarily
        improve the utility of the principal or the social welfare of the system}, 
  www_section = {action, communication network, hidden action, network routing}, 
  issn = {0733-8716}, 
  doi = {10.1109/JSAC.2007.070810}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/IEEE\%20Journal\%20\%2825\%29\%20-\%20Hidden-action\%20in\%20network\%20routing.pdf},
}
fessi-iptcomm2007
@conference{fessi-iptcomm2007,
  title = {A cooperative SIP infrastructure for highly reliable telecommunication
        services}, 
  author = {Fessi, Ali and Heiko Niedermayer and Kinkelin, Holger and Carle, Georg}, 
  booktitle = {IPTComm '07: Proceedings of the 1st international conference on Principles,
        systems and applications of IP telecommunications}, 
  organization = {ACM}, 
  year = {2007}, 
  address = {New York, NY, USA}, 
  pages = {29--38}, 
  publisher = {ACM}, 
  isbn = {978-1-60558-006-7}, 
  doi = {http://doi.acm.org/10.1145/1326304.1326310}, 
%%%%% ERROR: Missing field
% www_section = {?????},
}
fiveyearslater
@conference{fiveyearslater,
  title = {Privacy-enhancing technologies for the Internet, II: Five years later}, 
  author = {Ian Goldberg}, 
  booktitle = {Proceedings of Privacy Enhancing Technologies workshop (PET 2002)}, 
  organization = {Springer-Verlag, LNCS 2482}, 
  year = {2002}, 
  month = {April}, 
  editor = {Roger Dingledine and Paul Syverson}, 
  publisher = {Springer-Verlag, LNCS 2482}, 
  abstract = {Five years ago, {\textquotedblleft}Privacy-enhancing technologies for the
        Internet{\textquotedblright} [23] examined the state of the then newly emerging
        privacy-enhancing technologies. In this survey paper, we look back at the last
        five years to see what has changed, what has stagnated, what has succeeded, what
        has failed, and why. We also look at current trends with a view towards the
        future}, 
  www_section = {privacy}, 
  isbn = {978-3-540-00565-0}, 
  doi = {10.1007/3-540-36467-6}, 
  url = {http://www.springerlink.com/content/740p21gl5a9f640m/}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/petfive.pdf}, 
}
fk-2016-1-p46
@article{fk-2016-1-p46,
  title = {Zur Idee herrschaftsfreier kooperativer Internetdienste}, 
  author = {Christian Ricardo K{\"u}hne}, 
  journal = {FIfF-Kommunikation}, 
  year = {2016}, 
  chapter = {46}, 
  www_section = {Architecture, GNUnet, Internet}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/fk-2016-1-p46.pdf}, 
  www_tags = {selected}, 
  url = {https://bibliography.gnunet.org}, 
}
flow-correlation04
@conference{flow-correlation04,
  title = {On Flow Correlation Attacks and Countermeasures in Mix Networks}, 
  author = {Ye Zhu and Xinwen Fu and Bryan Graham and Riccardo Bettati and Wei Zhao}, 
  booktitle = {Proceedings of Privacy Enhancing Technologies workshop (PET 2004)}, 
  organization = {Springer Berlin / Heidelberg}, 
  volume = {3424}, 
  year = {2004}, 
  month = {May}, 
  pages = {207--225}, 
  publisher = {Springer Berlin / Heidelberg}, 
  series = {LNCS}, 
  abstract = {In this paper, we address issues related to flow correlation attacks and the
        corresponding countermeasures in mix networks. Mixes have been used in many
        anonymous communication systems and are supposed to provide countermeasures that
        can defeat various traffic analysis attacks. In this paper, we focus on a
        particular class of traffic analysis attack, flow correlation attacks, by which
        an adversary attempts to analyze the network traffic and correlate the traffic of
        a flow over an input link at a mix with that over an output link of the same mix.
        Two classes of correlation methods are considered, namely time-domain methods and
        frequency-domain methods. Based on our threat model and known strategies in
        existing mix networks, we perform extensive experiments to analyze the
        performance of mixes. We find that a mix with any known batching strategy may
        fail against flow correlation attacks in the sense that for a given flow over an
        input link, the adversary can correctly determine which output link is used by
        the same flow. We also investigated methods that can effectively counter the flow
        correlation attack and other timing attacks. The empirical results provided in
        this paper give an indication to designers of Mix networks about appropriate
        configurations and alternative mechanisms to be used to counter flow correlation
        attacks. This work was supported in part by the National Science Foundation under
        Contracts 0081761 and 0324988, by the Defense Advanced Research Projects Agency
        under Contract F30602-99-1-0531, and by Texas A\&M University under its
        Telecommunication and Information Task Force Program. Any opinions, findings, and
        conclusions or recommendations in this material, either expressed or implied, are
        those of the authors and do not necessarily reflect the views of the sponsors
        listed above}, 
  www_section = {flow correlation attack}, 
  isbn = {978-3-540-26203-9}, 
  doi = {10.1007/b136164}, 
  url = {http://www.springerlink.com/content/kej7uwxee8h71p81/}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/flow-correlation04.pdf},
}
foci11-decoy
@conference{foci11-decoy,
  title = {Decoy Routing: Toward Unblockable Internet Communication}, 
  author = {Josh Karlin and Daniel Ellard and Alden W. Jackson and Christine E. Jones and
        Greg Lauer and David P. Mankins and W. Timothy Strayer}, 
  booktitle = {FOCI'11--Proceedings of the USENIX Workshop on Free and Open Communications
        on the Internet}, 
  year = {2011}, 
  month = {August}, 
  address = {San Francisco, CA, USA}, 
  abstract = {We present decoy routing, a mechanism capable of circumventing common network
        filtering strategies. Unlike other circumvention techniques, decoy routing does
        not require a client to connect to a specific IP address (which is easily
        blocked) in order to provide circumvention. We show that if it is possible for a
        client to connect to any unblocked host/service, then decoy routing could be used
        to connect them to a blocked destination without cooperation from the host. This
        is accomplished by placing the circumvention service in the network itself --
        where a single device could proxy traffic between a significant fraction of hosts
        -- instead of at the edge}, 
  www_section = {decoy routing, Internet communication, network filter}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/FOCI\%2711\%20-\%20Decoy\%20Routing\%3A\%20Toward\%20Unblockable\%20Internet\%20Communication.pdf},
  url = {https://bibliography.gnunet.org}, 
}
forward_secure_encryption2014
@book{forward_secure_encryption2014,
  title = {Forward-Secure Distributed Encryption}, 
  author = {Lueks, Wouter and Hoepman, Jaap-Henk and Kursawe, Klaus}, 
  booktitle = {Privacy Enhancing Technologies}, 
  organization = {Springer International Publishing}, 
  volume = {8555}, 
  year = {2014}, 
  pages = {123--142}, 
  editor = {De Cristofaro, Emiliano and Murdoch, StevenJ}, 
  publisher = {Springer International Publishing}, 
  series = {Lecture Notes in Computer Science}, 
  abstract = {Distributed encryption is a cryptographic primitive that implements revocable
        privacy. The primitive allows a recipient of a message to decrypt it only if
        enough senders encrypted that same message. We present a new distributed
        encryption scheme that is simpler than the previous solution by Hoepman and
        Galindo{\textemdash}in particular it does not rely on pairings{\textemdash}and
        that satisfies stronger security requirements. Moreover, we show how to achieve
        key evolution, which is necessary to ensure scalability in many practical
        applications, and prove that the resulting scheme is forward secure. Finally, we
        present a provably secure batched distributed encryption scheme that is much more
        efficient for small plaintext domains, but that requires more storage}, 
  isbn = {978-3-319-08505-0}, 
  doi = {10.1007/978-3-319-08506-7_7}, 
  url = {http://dx.doi.org/10.1007/978-3-319-08506-7_7}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/foward_secure_encryption.pdf},
  www_section = {Unsorted}, 
}
fps2013wachs
@conference{fps2013wachs,
  title = {On the Feasibility of a Censorship Resistant Decentralized Name System}, 
  author = {Matthias Wachs and Martin Schanzenbach and Christian Grothoff}, 
  booktitle = {6th International Symposium on Foundations & Practice of Security (FPS
        2013)}, 
  organization = {Springer Verlag}, 
  year = {2013}, 
  month = {October}, 
  address = {La Rochelle, France}, 
  publisher = {Springer Verlag}, 
  abstract = {A central problem on the Internet today is that key infrastructure for
        security is concentrated in a few places. This is particularly true in the areas
        of naming and public key infrastructure. Secret services and other government
        organizations can use this fact to block access to information or monitor
        communications. One of the most popular and easy to perform techniques is to make
        information on the Web inaccessible by censoring or manipulating the Domain Name
        System (DNS). With the introduction of DNSSEC, the DNS is furthermore posed to
        become an alternative PKI to the failing X.509 CA system, further cementing the
        power of those in charge of operating DNS. This paper maps the design space and
        gives design requirements for censorship resistant name systems. We survey the
        existing range of ideas for the realization of such a system and discuss the
        challenges these systems have to overcome in practice. Finally, we present the
        results from a survey on browser usage, which supports the idea that delegation
        should be a key ingredient in any censorship resistant name system}, 
  www_section = {Unsorted}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/fps2013wachs.pdf}, 
  url = {https://bibliography.gnunet.org}, 
}
freedom2-arch
@booklet{freedom2-arch,
  title = {Freedom Systems 2.0 Architecture}, 
  author = {Philippe Boucher and Adam Shostack and Ian Goldberg}, 
  year = {2000}, 
  month = {December}, 
  publisher = {Zero Knowledge Systems, {Inc.}}, 
  type = {White Paper}, 
  abstract = {This white paper, targeted at the technically savvy reader, offers a detailed
        look at the Freedom 2.0 System architecture. It is intended to give the reader a
        good understanding of the components that make up this system and the
        relationships between them, as well as to encourage analysis of the system}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/freedom2-arch.pdf}, 
  url = {https://bibliography.gnunet.org}, 
%%%%% ERROR: Missing field
% www_section = {?????},
}
freedom21-security
@booklet{freedom21-security,
  title = {Freedom Systems 2.1 Security Issues and Analysis}, 
  author = {Adam Back and Ian Goldberg and Adam Shostack}, 
  year = {2001}, 
  month = {May}, 
  publisher = {Zero Knowledge Systems, {Inc.}}, 
  type = {White Paper}, 
  abstract = {We describe attacks to which Freedom, or Freedom users, may be vulnerable.
        These attacks are those that reduce the privacy of a Freedom user, through
        exploiting cryptographic, design or implementation issues. We include issues
        which may not be Freedom security issues which arise when the system is not
        properly used. This disclosure includes all known design or implementation flaws,
        as well as places where various trade-offs made while creating the system have
        privacy implications. We also discuss cryptographic points that are needed for a
        complete understanding of how Freedom works, including ones we don't believe can
        be used to reduce anyone's privacy}, 
  www_section = {Freedom, privacy}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Freedom_Security2-1.pdf},
}
freehaven-berk
@conference{freehaven-berk,
  title = {The Free Haven Project: Distributed Anonymous Storage Service}, 
  author = {Roger Dingledine and Michael J. Freedman and David Molnar}, 
  booktitle = {Proceedings of Designing Privacy Enhancing Technologies: Workshop on Design
        Issues in Anonymity and Unobservability}, 
  organization = {Springer-Verlag, LNCS 2009}, 
  year = {2000}, 
  month = {July}, 
  publisher = {Springer-Verlag, LNCS 2009}, 
  abstract = {We present a design for a system of anonymous storage which resists the
        attempts of powerful adversaries to find or destroy any stored data. We enumerate
        distinct notions of anonymity for each party in the system, and suggest a way to
        classify anonymous systems based on the kinds of anonymity provided. Our design
        ensures the availability of each document for a publisher-specified lifetime. A
        reputation system provides server accountability by limiting the damage caused
        from misbehaving servers. We identify attacks and defenses against anonymous
        storage services, and close with a list of problems which are currently
        unsolved}, 
  www_section = {accountability, anonymity, anonymous publishing}, 
  isbn = {978-3-540-41724-8}, 
  doi = {10.1007/3-540-44702-4}, 
  url = {http://www.springerlink.com/content/uh3mbw5m6u6xt24v/}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/freehaven_pres.pdf}, 
}
freenet
@conference{freenet,
  title = {Freenet: A Distributed Anonymous Information Storage and Retrieval System}, 
  author = {Ian Clarke and Sandberg, Oskar and Brandon Wiley and Theodore W. Hong}, 
  booktitle = {Proceedings of Designing Privacy Enhancing Technologies: Workshop on Design
        Issues in Anonymity and Unobservability}, 
  year = {2000}, 
  month = {July}, 
  pages = {46--66}, 
  abstract = {We describe Freenet, an adaptive peer-to-peer network application that
        permits the publication, replication, and retrieval of data while protecting the
        anonymity of both authors and readers. Freenet operates as a network of identical
        nodes that collectively pool their storage space to store data files and
        cooperate to route requests to the most likely physical location of data. No
        broadcast search or centralized location index is employed. Files are referred to
        in a location-independent manner, and are dynamically replicated in locations
        near requestors and deleted from locations where there is no interest. It is
        infeasible to discover the true origin or destination of a file passing through
        the network, and di$\#$cult for a node operator to determine or be held
        responsible for the actual physical contents of her own node}, 
  www_section = {anonymity, Freenet, P2P}, 
  url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.10.4919}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.10.4919_0.pdf}, 
}
fu-active
@conference{fu-active,
  title = {Active Traffic Analysis Attacks and Countermeasures}, 
  author = {Xinwen Fu and Bryan Graham and Riccardo Bettati and Wei Zhao}, 
  booktitle = {Proceedings of the 2003 International Conference on Computer Networks and
        Mobile Computing}, 
  organization = {IEEE Computer Society Washington, DC, USA}, 
  year = {2003}, 
  month = {January}, 
  pages = {31--39}, 
  publisher = {IEEE Computer Society Washington, DC, USA}, 
  abstract = {To explore mission-critical information, an adversary using active traffic
        analysis attacks injects probing traffic into the victim network and analyzes the
        status of underlying payload traffic. Active traffic analysis attacks are easy to
        deploy and hence become a serious threat to mission critical applications. This
        paper suggests statistical pattern recognition as a fundamental technology to
        evaluate effectiveness of active traffic analysis attacks and corresponding
        countermeasures. Our evaluation shows that sample entropy of ping packets ' round
        trip time is an effective feature statistic to discover the payload traffic rate.
        We propose simple countermeasures that can significantly reduce the effectiveness
        of ping-based active traffic analysis attacks. Our experiments validate the
        effectiveness of this scheme, which can also be used in other scenarios}, 
  www_section = {traffic analysis}, 
  isbn = {0-7695-2033-2}, 
  url = {http://portal.acm.org/citation.cfm?id=950964}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/fu-active.pdf}, 
}
fu-analytical
@conference{fu-analytical,
  title = {Analytical and Empirical Analysis of Countermeasures to Traffic Analysis
        Attacks}, 
  author = {Xinwen Fu and Bryan Graham and Riccardo Bettati and Wei Zhao}, 
  booktitle = {Proceedings of the 2003 International Conference on Parallel Processing}, 
  year = {2003}, 
  pages = {483--492}, 
  abstract = {This paper studies countermeasures to traffic analysis attacks. A common
        strategy for such countermeasures is link padding. We consider systems where
        payload traffic is padded so that packets have either constant inter-arrival
        times or variable inter-arrival times. The adversary applies statistical
        recognition techniques to detect the payload traffic rates by using statistical
        measures like sample mean, sample variance, or sample entropy. We evaluate
        quantitatively the ability of the adversary to make a correct detection and
        derive closed-form formulas for the detection rate based on analytical models.
        Extensive experiments were carried out to validate the system performance
        predicted by the analytical method. Based on the systematic evaluations, we
        develop design guidelines for the proper configuration of a system in order to
        minimize the detection rate}, 
  www_section = {traffic analysis}, 
  isbn = {0-7695-2017-0}, 
  doi = {10.1109/ICPP.2003.1240613}, 
  url = {http://www.computer.org/portal/web/csdl/doi?doc=doi/10.1109/ICPP.2003.1240613}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/fu-analytical.pdf}, 
}
fuhrmann05emnets
@conference{fuhrmann05emnets,
  title = {The Use of Scalable Source Routing for Networked Sensors}, 
  author = {Thomas Fuhrmann}, 
  booktitle = {Proceedings of the 2nd IEEE Workshop on Embedded Networked Sensors}, 
  organization = {IEEE Computer Society Washington, DC, USA}, 
  year = {2005}, 
  address = {Sydney, Australia}, 
  pages = {163--165}, 
  publisher = {IEEE Computer Society Washington, DC, USA}, 
  type = {publication}, 
  abstract = {In this paper, we briefly present a novel routing algorithm, scalable source
        routing (SSR), which is capable of memory and message efficient routing in
        networks with 'random topology'. This algorithm enables sensor networks to use
        recent peer to-peer mechanisms from the field of overlay networks, like e.g.
        distributed hash tables and indirection infrastructures. Unlike other proposals
        along that direction, SSR integrates all necessary routing tasks into one simple,
        highly efficient routing protocol. Simulations demonstrate that in a small-world
        network with more than 100 000 nodes, SSR requires each node to only store
        routing data for 255 other nodes to establish routes between arbitrary pairs of
        nodes. These routes are on average only about 20-30\% longer than the globally
        optimal path between these nodes}, 
  www_section = {scalable source routing, topology matching}, 
  isbn = {0-7803-9246-9}, 
  url = {http://i30www.ira.uka.de/research/publications/p2p/}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/fuhrmann05emnets.pdf}, 
}
fuhrmann05networking
@conference{fuhrmann05networking,
  title = {A Self-Organizing Routing Scheme for Random Networks}, 
  author = {Thomas Fuhrmann}, 
  booktitle = {Proceedings of the 4th IFIP-TC6 Networking Conference}, 
  organization = {Springer Berlin / Heidelberg}, 
  year = {2005}, 
  address = {Waterloo, Canada}, 
  pages = {1366--1370}, 
  publisher = {Springer Berlin / Heidelberg}, 
  type = {publication}, 
  abstract = {Most routing protocols employ address aggregation to achieve scalability with
        respect to routing table size. But often, as networks grow in size and
        complexity, address aggregation fails. Other networks, e.g. sensor-actuator
        networks or ad-hoc networks, that are characterized by organic growth might not
        at all follow the classical hierarchical structures that are required for
        aggregation. In this paper, we present a fully self-organizing routing scheme
        that is able to efficiently route messages in random networks with randomly
        assigned node addresses. The protocol combines peer-to-peer techniques with
        source routing and can be implemented to work with very limited resource demands.
        With the help of simulations we show that it nevertheless quickly converges into
        a globally consistent state and achieves a routing stretch of only 1.2 -- 1.3 in
        a network with more than 105 randomly assigned nodes}, 
  www_section = {ad-hoc networks, P2P, self-organization}, 
  isbn = {978-3-540-25809-4}, 
  doi = {10.1007/b136094}, 
  url = {http://i30www.ira.uka.de/research/publications/p2p/}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/fuhrmann05networking.pdf},
}
fuhrmann06pushing-tr
@booklet{fuhrmann06pushing-tr,
  title = {Pushing Chord into the Underlay: Scalable Routing for Hybrid MANETs}, 
  author = {Thomas Fuhrmann and Di, Pengfei and Kendy Kutzner and Cramer, Curt}, 
  number = {2006-12}, 
  year = {2006}, 
  publisher = {Fakult{\"a}t f{\"u}r Informatik, Universit{\"a}t Karlsruhe}, 
  type = {Interner Bericht}, 
  abstract = {SCALABLE SOURCE ROUTING is a novel routing approach for large unstructured
        networks, for example hybrid mobile ad hoc networks (MANETs), mesh networks, or
        sensor-actuator networks. It is especially suited for organically growing
        networks of many resource-limited mobile devices supported by a few fixed-wired
        nodes. SCALABLE SOURCE ROUTING is a full-fledged routing protocol that directly
        provides the semantics of a structured peer-to-peer overlay. Hence, it can serve
        as an efficient basis for fully decentralized applications on mobile devices.
        SCALABLE SOURCE ROUTING combines source routing in the physical network with
        Chord-like routing in the virtual ring formed by the address space. Message
        forwarding greedily decreases the distance in the virtual ring while preferring
        physically short paths. Unlike previous approaches, scalability is achieved
        without imposing artificial hierarchies or assigning location-dependent
        addresses. SCALABLE SOURCE ROUTING enables any-to-any communication in a flat
        address space without maintaining any-to-any routes. Each node proactively
        discovers its virtual vicinity using an iterative process. Additionally, it
        passively caches a limited amount of additional paths. By means of extensive
        simulation, we show that SCALABLE SOURCE ROUTING is resource-efficient and
        scalable well beyond 10,000 nodes}, 
  url = {http://i30www.ira.uka.de/research/publications/p2p/}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/fuhrmann06pushing.pdf}, 
%%%%% ERROR: Missing field
% www_section = {?????},
}
fuhrmann07wons
@conference{fuhrmann07wons,
  title = {Performance of Scalable Source Routing in Hybrid MANETs}, 
  author = {Thomas Fuhrmann}, 
  booktitle = {Proceedings of the Fourth Annual Conference on Wireless On demand Network
        Systems and Services}, 
  year = {2007}, 
  address = {Obergurgl, Austria}, 
  pages = {122--129}, 
  type = {publication}, 
  abstract = {Scalable source routing (SSR) is a novel routing approach for large
        unstructured networks such as mobile ad hoc networks, mesh networks, or
        sensor-actuator networks. It is especially suited for organically growing
        networks of many resource-limited mobile devices supported by a few fixed-wired
        nodes. SSR is a full-fledged network layer routing protocol that directly
        provides the semantics of a structured peer-to-peer network. Hence, it can serve
        as an efficient basis for fully decentralized applications on mobile devices. SSR
        combines source routing in the physical network with Chord-like routing in the
        virtual ring formed by the address space. Message forwarding greedily decreases
        the distance in the virtual ring while preferring physically short paths.
        Thereby, scalability is achieved without imposing artificial hierarchies or
        assigning location-dependent addresses}, 
  www_section = {mobile Ad-hoc networks, P2P, scalable source routing}, 
  url = {http://i30www.ira.uka.de/research/publications/p2p/}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/fuhrmann07wons.pdf}, 
}
fuhrmann08comparable
@booklet{fuhrmann08comparable,
  title = {Towards Comparable Network Simulations}, 
  author = {Di, Pengfei and Yaser Houri and Kendy Kutzner and Thomas Fuhrmann}, 
  number = {2008-9}, 
  year = {2008}, 
  month = {August}, 
  publisher = {Dept. of Computer Science, Universit{\"a}t Karlsruhe (TH)}, 
  type = {Interner Bericht}, 
  abstract = {Simulations have been a valuable and much used tool in networking research
        for decades. New protocols are evaluated by simulations. Often, competing designs
        are judged by their respective performance in simulations. Despite this great
        importance the state-of-the-art in network simulations is nevertheless still low.
        A recent survey showed that most publications in a top conference did not even
        give enough details to repeat the simulations. In this paper we go beyond
        repeatability and ask: Are different simulations comparable? We study various
        implementations of the IEEE 802.11 media access layer in ns-2 and OMNeT++ and
        report some dramatic differences. These findings indicate that two protocols
        cannot be compared meaningfully unless they are compared in the very same
        simulation environment. We claim that this problem limits the value of the
        respective publications because readers are forced to re-implement the work that
        is described in the paper rather than building on its results. Facing the
        additional problem that not all authors will agree on one simulator, we address
        ways of making different simulators comparable}, 
  url = {http://i30www.ira.uka.de/research/publications/p2p/}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/towards_comparable_network_simulations.pdf},
%%%%% ERROR: Missing field
% www_section = {?????},
}
gap
@conference{gap,
  title = {gap--Practical Anonymous Networking}, 
  author = {Krista Bennett and Christian Grothoff}, 
  booktitle = {Designing Privacy Enhancing Technologies}, 
  organization = {Springer-Verlag}, 
  year = {2003}, 
  pages = {141--160}, 
  publisher = {Springer-Verlag}, 
  abstract = {This paper describes how anonymity is achieved in GNUnet, a framework for
        anonymous distributed and secure networking. The main focus of this work is gap,
        a simple protocol for anonymous transfer of data which can achieve better
        anonymity guarantees than many traditional indirection schemes and is
        additionally more efficient. gap is based on a new perspective on how to achieve
        anonymity. Based on this new perspective it is possible to relax the requirements
        stated in traditional indirection schemes, allowing individual nodes to balance
        anonymity with efficiency according to their specific needs}, 
  www_section = {anonymity, GNUnet, installation}, 
  url = {http://grothoff.org/christian/aff.pdf}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/aff.pdf}, 
}
garbacki062fast
@conference{garbacki062fast,
  title = {2Fast: Collaborative Downloads in P2P Networks}, 
  author = {Garbacki, Pawel and Alexandru Iosup and Epema, Dick H. J. and van Steen,
        Maarten}, 
  booktitle = {P2P 2006. 6th IEEE International Conference on Peer-to-Peer Computing}, 
  organization = {IEEE Computer Society}, 
  year = {2006}, 
  month = {September}, 
  address = {Cambridge, UK}, 
  publisher = {IEEE Computer Society}, 
  abstract = {P2P systems that rely on the voluntary contribution of bandwidth by the
        individual peers may suffer from free riding. To address this problem, mechanisms
        enforcing fairness in bandwidth sharing have been designed, usually by limiting
        the download bandwidth to the available upload bandwidth. As in real environments
        the latter is much smaller than the former, these mechanisms severely affect the
        download performance of most peers. In this paper we propose a system called
        2Fast, which solves this problem while preserving the fairness of bandwidth
        sharing. In 2Fast, we form groups of peers that collaborate in downloading a file
        on behalf of a single group member, which can thus use its full download
        bandwidth. A peer in our system can use its currently idle bandwidth to help
        other peers in their ongoing downloads, and get in return help during its own
        downloads. We assess the performance of 2Fast analytically and experimentally,
        the latter in both real and simulated environments. We find that in realistic
        bandwidth limit settings, 2Fast improves the download speed by up to a factor of
        3.5 in comparison to state-of-the-art P2P download protocols}, 
  www_section = {2fast, bandwidth sharing, collaborative download, free-riding, P2P, p2p
        network, peer-to-peer networking}, 
  isbn = {0-7695-2679-9}, 
  doi = {10.1109/P2P.2006.1}, 
  url = {http://www.arnetminer.org/viewpub.do?pid=525534}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/IEEE\%20Computer\%20Society\%20-\%202Fast.pdf},
}
gauger2011lj
@article{gauger2011lj,
  title = {Performance Regression Monitoring with Gauger}, 
  author = {Polot, Bartlomiej and Christian Grothoff}, 
  journal = {LinuxJournal}, 
  number = {209}, 
  year = {2011}, 
  month = {September}, 
  chapter = {68}, 
  www_section = {Gauger, GNUnet}, 
  url = {http://www.linuxjournaldigital.com/linuxjournal/201109$\#$pg68}, 
}
george-thesis
@mastersthesis{george-thesis,
  title = {Better Anonymous Communications}, 
  author = {George Danezis}, 
  school = {University of Cambridge}, 
  year = {2004}, 
  month = {July}, 
  type = {phd}, 
  url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.58.3200}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/george-thesis.pdf}, 
%%%%% ERROR: Missing field
% www_section = {?????},
}
gkantsidis:network
@conference{gkantsidis:network,
  title = {Network coding for large scale content distribution}, 
  author = {Christos Gkantsidis and Pablo Rodriguez}, 
  booktitle = {INFOCOM'05. Proceedings of the 24th IEEE International Conference on
        Computer Communications}, 
  organization = {IEEE Computer Society}, 
  year = {2005}, 
  month = {March}, 
  address = {Miami, FL, USA}, 
  pages = {2235--2245}, 
  publisher = {IEEE Computer Society}, 
  abstract = {We propose a new scheme for content distribution of large files that is based
        on network coding. With network coding, each node of the distribution network is
        able to generate and transmit encoded blocks of information. The randomization
        introduced by the coding process eases the scheduling of block propagation, and,
        thus, makes the distribution more efficient. This is particularly important in
        large unstructured overlay networks, where the nodes need to make block
        forwarding decisions based on local information only. We compare network coding
        to other schemes that transmit unencoded information (i.e. blocks of the original
        file) and, also, to schemes in which only the source is allowed to generate and
        transmit encoded packets. We study the performance of network coding in
        heterogeneous networks with dynamic node arrival and departure patterns,
        clustered topologies, and when incentive mechanisms to discourage free-riding are
        in place. We demonstrate through simulations of scenarios of practical interest
        that the expected file download time improves by more than 20-30\% with network
        coding compared to coding at the server only and, by more than 2-3 times compared
        to sending unencoded information. Moreover, we show that network coding improves
        the robustness of the system and is able to smoothly handle extreme situations
        where the server and nodes leave the system}, 
  www_section = {large scale content distribution, network coding}, 
  isbn = {0-7803-8968-9}, 
  doi = {http://dx.doi.org/10.1109/INFCOM.2005.1498511}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/INFOCOM\%2705\%20-\%20Network\%20coding\%20for\%20large\%20scale\%20content\%20distribution.pdf},
}
gnunettransport
@conference{gnunettransport,
  title = {A Transport Layer Abstraction for Peer-to-Peer Networks}, 
  author = {Ronaldo A. Ferreira and Christian Grothoff and Paul Ruth}, 
  booktitle = {Proceedings of the 3rd International Symposium on Cluster Computing and the
        Grid (GRID 2003)}, 
  organization = {IEEE Computer Society}, 
  year = {2003}, 
  pages = {398--403}, 
  publisher = {IEEE Computer Society}, 
  abstract = {The initially unrestricted host-to-host communication model provided by the
        Internet Protocol has deteriorated due to political and technical changes caused
        by Internet growth. While this is not a problem for most client-server
        applications, peer-to-peer networks frequently struggle with peers that are only
        partially reachable. We describe how a peer-to-peer framework can hide diversity
        and obstacles in the underlying Internet and provide peer-to-peer applications
        with abstractions that hide transport specific details. We present the details of
        an implementation of a transport service based on SMTP. Small-scale benchmarks
        are used to compare transport services over UDP, TCP, and SMTP}, 
  www_section = {GNUnet, P2P}, 
  url = {http://grothoff.org/christian/transport.pdf}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/transport.pdf}, 
}
gnunset-psyc2013
@mastersthesis{gnunset-psyc2013,
  title = {Design of a Social Messaging System Using Stateful Multicast}, 
  author = {Gabor X Toth}, 
  school = {University of Amsterdam}, 
  volume = {M.Sc}, 
  year = {2013}, 
  address = {Amsterdam}, 
  pages = {0--76}, 
  type = {Master's}, 
  abstract = {This work presents the design of a social messaging service for the GNUnet
        peer-to-peer framework that offers scalability, extensibility, and end-to-end
        encrypted communication. The scalability property is achieved through multicast
        message delivery, while extensibility is made possible by using PSYC (Protocol
        for SYnchronous Communication), which provides an extensible RPC (Remote
        Procedure Call) syntax that can evolve over time without having to upgrade the
        software on all nodes in the network. Another key feature provided by the PSYC
        layer are stateful multicast channels, which are used to store e.g. user
        profiles. End-to-end encrypted communication is provided by the mesh service of
        GNUnet, upon which the multicast channels are built. Pseudonymous users and
        social places in the system have cryptographical identities --- identified by
        their public key --- these are mapped to human memorable names using GNS (GNU
        Name System), where each pseudonym has a zone pointing to its places}, 
  www_section = {Unsorted}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/gnunet-psyc.pdf}, 
  url = {https://bibliography.gnunet.org}, 
}
goldberg-2007
@conference{goldberg-2007,
  title = {Improving the Robustness of Private Information Retrieval}, 
  author = {Ian Goldberg}, 
  booktitle = {Proceedings of the 2007 IEEE Symposium on Security and Privacy}, 
  organization = {IEEE Computer Society Washington, DC, USA}, 
  year = {2007}, 
  month = {May}, 
  publisher = {IEEE Computer Society Washington, DC, USA}, 
  abstract = {Since 1995, much work has been done creating protocols for private
        information retrieval (PIR). Many variants of the basic PIR model have been
        proposed, including such modifications as computational vs. information-theoretic
        privacy protection, correctness in the face of servers that fail to respond or
        that respond incorrectly, and protection of sensitive data against the database
        servers themselves. In this paper, we improve on the robustness of PIR in a
        number of ways. First, we present a Byzantine-robust PIR protocol which provides
        information-theoretic privacy protection against coalitions of up to all but one
        of the responding servers, improving the previous result by a factor of 3. In
        addition, our protocol allows for more of the responding servers to return
        incorrect information while still enabling the user to compute the correct
        result. We then extend our protocol so that queries have information-theoretic
        protection if a limited number of servers collude, as before, but still retain
        computational protection if they all collude. We also extend the protocol to
        provide information-theoretic protection to the contents of the database against
        collusions of limited numbers of the database servers, at no additional
        communication cost or increase in the number of servers. All of our protocols
        retrieve a block of data with communication cost only O(.) times the size of the
        block, where . is the number of servers}, 
  www_section = {private information retrieval, robustness}, 
  isbn = {0-7695-2848-1}, 
  doi = {10.1109/SP.2007.23}, 
  url = {http://portal.acm.org/citation.cfm?id=1264203}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/goldberg-2007.pdf}, 
}
golle:ccs2004
@conference{golle:ccs2004,
  title = {Parallel Mixing}, 
  author = {Philippe Golle and Ari Juels}, 
  booktitle = {Proceedings of the 11th ACM Conference on Computer and Communications
        Security (CCS 2004)}, 
  organization = {ACM Press}, 
  year = {2004}, 
  month = {October}, 
  address = {Washington DC, USA}, 
  publisher = {ACM Press}, 
  abstract = {Efforts to design faster synchronous mix networks have focused on reducing
        the computational cost of mixing per server. We propose a different approach: our
        reencryption mixnet allows servers to mix inputs in parallel. The result is a
        dramatic reduction in overall mixing time for moderate-to-large numbers of
        servers. As measured in the model we describe, for n inputs and $M$ servers our
        parallel re encryption mixnet produces output in time at most 2n -- and only
        around n assuming a majority of honest servers. In contrast, a traditional,
        sequential, synchronous re-encryption mixnet requires time Mn. Parallel
        re-encryption mixnets offer security guarantees comparable to those of
        synchronous mixnets, and in many cases only a slightly weaker guarantee of
        privacy. Our proposed construction is applicable to many recently proposed
        re-encryption mixnets, such as those of Furukawa and Sako, Neff, Jakobsson et
        al., and Golle and Boneh. In practice, parallel mixnets promise a potentially
        substantial time saving in applications such as anonymous electronic elections}, 
  www_section = {anonymity, privacy}, 
  isbn = {1-58113-961-6}, 
  doi = {10.1145/1030083.1030113}, 
  url = {http://portal.acm.org/citation.cfm?id=1030113}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/golle-ccs2004.pdf}, 
}
golle:eurocrypt2004
@conference{golle:eurocrypt2004,
  title = {Dining Cryptographers Revisited}, 
  author = {Philippe Golle and Ari Juels}, 
  booktitle = {Proceedings of Eurocrypt 2004}, 
  organization = {Springer Berlin / Heidelberg}, 
  year = {2004}, 
  month = {May}, 
  publisher = {Springer Berlin / Heidelberg}, 
  abstract = {Dining cryptographers networks (or DC-nets) are a privacy-preserving
        primitive devised by Chaum for anonymous message publication. A very attractive
        feature of the basic DC-net is its non-interactivity. Subsequent to key
        establishment, players may publish their messages in a single broadcast round,
        with no player-to-player communication. This feature is not possible in other
        privacy-preserving tools like mixnets. A drawback to DC-nets, however, is that
        malicious players can easily jam them, i.e., corrupt or block the transmission of
        messages from honest parties, and may do so without being traced. Several
        researchers have proposed valuable methods of detecting cheating players in
        DC-nets. This is usually at the cost, however, of multiple broadcast rounds, even
        in the optimistic case, and often of high computational and/or communications
        overhead, particularly for fault recovery. We present new DC-net constructions
        that simultaneously achieve non-interactivity and high-probability detection and
        identification of cheating players. Our proposals are quite efficient, imposing a
        basic cost that is linear in the number of participating players. Moreover, even
        in the case of cheating in our proposed system, just one additional broadcast
        round suffices for full fault recovery. Among other tools, our constructions
        employ bilinear maps, a recently popular cryptographic technique for reducing
        communication complexity}, 
  www_section = {anonymity, dining cryptographers, non-interactive, privacy}, 
  isbn = {978-3-540-21935-4}, 
  doi = {10.1007/b97182}, 
  url = {http://www.springerlink.com/content/ud2tb1fyk5m2ywlu/}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/golle-eurocrypt2004.pdf},
}
golle:pet2004
@conference{golle:pet2004,
  title = {Reputable Mix Networks}, 
  author = {Philippe Golle}, 
  booktitle = {Proceedings of Privacy Enhancing Technologies workshop (PET 2004)}, 
  volume = {3424}, 
  year = {2004}, 
  month = {May}, 
  pages = {51--63}, 
  series = {LNCS}, 
  abstract = {We define a new type of mix network that offers a reduced form of robustness:
        the mixnet can prove that every message it outputs corresponds to an input
        submitted by a player without revealing which input (for honest players). We call
        mixnets with this property reputable mixnets. Reputable mixnets are not fully
        robust, because they offer no guarantee that distinct outputs correspond to
        distinct inputs. In particular, a reputable mix may duplicate or erase messages.
        A reputable mixnet, however, can defend itself against charges of having authored
        the output messages it produces. This ability is very useful in practice, as it
        shields the mixnet from liability in the event that an output message is
        objectionable or illegal. We propose three very efficient protocols for reputable
        mixnets, all synchronous. The first protocol is based on blind signatures. It
        works both with Chaumian decryption mixnets or re-encryption mixnets based on
        ElGamal, but guarantees a slightly weaker form of reputability which we call
        near-reputability. The other two protocols are based on ElGamal re-encryption
        over a composite group and offer true reputability. One requires interaction
        between the mixnet and the players before players submit their inputs. The other
        assumes no interaction prior to input submission}, 
  www_section = {anonymity, privacy}, 
  isbn = {978-3-540-26203-9}, 
  doi = {10.1007/b136164}, 
  url = {http://www.springerlink.com/content/mqpu4nyljy82ca90/}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/golle-pet2004.pdf}, 
}
gossipico2012
@conference{gossipico2012,
  title = {Gossip-based counting in dynamic networks}, 
  author = {Ruud van de Bovenkamp and Fernando Kuipers and Piet Van Mieghem}, 
  booktitle = {IFIP International Conferences on Networking (Networking 2012)}, 
  organization = {Springer Verlag}, 
  year = {2012}, 
  month = {May}, 
  address = {Prague, CZ}, 
  pages = {404--419}, 
  publisher = {Springer Verlag}, 
  www_section = {network size estimation}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Gossipico.pdf}, 
  url = {https://bibliography.gnunet.org}, 
}
grothoff2011syssec
@conference{grothoff2011syssec,
  title = {The Free Secure Network Systems Group: Secure Peer-to-Peer Networking and
        Beyond}, 
  author = {Christian Grothoff}, 
  booktitle = {SysSec 2011}, 
  year = {2011}, 
  address = {Amsterdam, Netherlands}, 
  abstract = {This paper introduces the current research and future plans of the Free
        Secure Network Systems Group at the Technische Universit\&auml;t
        M\&uuml;nchen. In particular, we provide some insight into the development
        process and architecture of the GNUnet P2P framework and the challenges we are
        currently working on}, 
  www_section = {anonymity, GNUnet, routing}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/syssec2011.pdf}, 
  url = {https://bibliography.gnunet.org}, 
}
guha05characterization
@booklet{guha05characterization,
  title = {Characterization and measurement of tcp traversal through nats and firewalls}, 
  author = {Saikat Guha and Paul Francis}, 
  year = {2005}, 
  abstract = {In recent years, the standards community has developed techniques for
        traversing NAT/firewall boxes with UDP (that is, establishing UDP flows between
        hosts behind NATs). Because of the asymmetric nature of TCP connection
        establishment, however, NAT traversal of TCP is more difficult. Researchers have
        recently proposed a variety of promising approaches for TCP NAT traversal. The
        success of these approaches, however, depend on how NAT boxes respond to various
        sequences of TCP (and ICMP) packets. This paper presents the first broad study of
        NAT behavior for a comprehensive set of TCP NAT traversal techniques over a wide
        range of commercial NAT products. We developed a publicly available software test
        suite that measures the NAT's responses both to a variety of isolated probes and
        to complete TCP connection establishments. We test sixteen NAT products in the
        lab, and 93 home NATs in the wild. Using these results, as well as market data
        for NAT products, we estimate the likelihood of successful NAT traversal for home
        networks. The insights gained from this paper can be used to guide both design of
        TCP NAT traversal protocols and the standardization of NAT/firewall behavior,
        including the IPv4-IPv6 translating NATs critical for IPv6 transition}, 
  www_section = {firewall, NAT}, 
  url = {http://portal.acm.org/citation.cfm?id=1251086.1251104}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/imc05-tcpnat.pdf}, 
}
guha6ess
@conference{guha6ess,
  title = {An Experimental Study of the Skype Peer-to-Peer VoIP System}, 
  author = {Saikat Guha and Daswani, Neil and Jain, Ravi}, 
  booktitle = {IPTPS'06--Proceedings of The 5th International Workshop on Peer-to-Peer
        Systems}, 
  year = {2006}, 
  month = {February}, 
  address = {Santa Barbara, CA, USA}, 
  pages = {1--6}, 
  abstract = {Despite its popularity, relatively little is known about the traf- fic
        characteristics of the Skype VoIP system and how they differ from other P2P
        systems. We describe an experimental study of Skype VoIP traffic conducted over a
        one month period, where over 30 million datapoints were collected regarding the
        population of online clients, the number of supernodes, and their traffic
        characteristics. The results indicate that although the structure of the Skype
        system appears to be similar to other P2P systems, particularly KaZaA, there are
        several significant differences in traffic. The number of active clients shows
        diurnal and work-week behavior, correlating with normal working hours regardless
        of geography. The population of supernodes in the system tends to be relatively
        stable; thus node churn, a significant concern in other systems, seems less
        problematic in Skype. The typical bandwidth load on a supernode is relatively
        low, even if the supernode is relaying VoIP traffic. The paper aims to aid
        further understanding of a signifi- cant, successful P2P VoIP system, as well as
        provide experimental data that may be useful for design and modeling of such
        systems. These results also imply that the nature of a VoIP P2P system like Skype
        differs fundamentally from earlier P2P systems that are oriented toward
        file-sharing, and music and video download applications, and deserves more
        attention from the research community}, 
  www_section = {decentralized, indexing, overlay, P2P, skype, unstructured}, 
  url = {http://saikat.guha.cc/pub/iptps06-skype/}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/IPTPS\%2706\%20-\%20Skype\%20p2p\%20VoIP\%20System.pdf},
}
gup
@conference{gup,
  title = {Authentic Attributes with Fine-Grained Anonymity Protection}, 
  author = {Stuart Stubblebine and Paul Syverson}, 
  booktitle = {Proceedings of Financial Cryptography (FC 2000)}, 
  organization = {Springer-Verlag, LNCS 1962}, 
  year = {2001}, 
  pages = {276--294}, 
  editor = {Yair Frankel}, 
  publisher = {Springer-Verlag, LNCS 1962}, 
  abstract = {Collecting accurate profile information and protecting an individual's
        privacy are ordinarily viewed as being at odds. This paper presents mechanisms
        that protect individual privacy while presenting accurate-indeed
        authenticated-profile information to servers and merchants. In particular, we
        give a pseudonym registration scheme and system that enforces unique user
        registration while separating trust required of registrars, issuers, and
        validators. This scheme enables the issuance of global unique pseudonyms (GUPs)
        and attributes enabling practical applications such as authentication of accurate
        attributes and enforcement of
        {\textquotedblleft}one-to-a-customer{\textquotedblright} properties. We also
        present a scheme resilient to even pseudonymous profiling yet preserving the
        ability of merchants to authenticate the accuracy of information. It is the first
        mechanism of which the authors are aware to guarantee recent validity for group
        signatures, and more generally multi-group signatures, thus effectively enabling
        revocation of all or some of the multi-group certificates held by a principal}, 
  www_section = {privacy, pseudonym}, 
  isbn = {978-3-540-42700-1}, 
  doi = {10.1007/3-540-45472-1}, 
  url = {http://portal.acm.org/citation.cfm?id=728483}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/gup.pdf}, 
}
halpern-oneill-2003
@article{halpern-oneill-2003,
  title = {Anonymity and Information Hiding in Multiagent Systems}, 
  author = {Joseph Y. Halpern and Kevin R. O'Neil}, 
  journal = {Journal of Computer Security}, 
  volume = {13}, 
  year = {2004}, 
  pages = {483--514}, 
  abstract = {We Provide a framework for reasoning about information-hiding requirements in
        multiagent systems and for reasoning about anonymity in particular. Our framework
        employs the modal logic of knowledge within the context of the runs and systems
        framework, much in the spirit of our carlier work on secercy [13]. we give
        several definitions of anonymity with respect to agents, actions and observers in
        multiagent systems, and we relate our defenitions of anonymity to other
        definitions of information hiding, such as secrecy. We also give probabilistic
        definitions of anonymity that are able to quantify an observer's uncertainty
        about the state of the system. Finally, we relate our definitions of anonymity to
        other formalizations of anonymity and information hiding, including defenitions
        of anonymity in the process algebra CSP and defenitions of information hiding
        using function views}, 
  www_section = {anonymity, epistemic logic, formal methods}, 
  issn = {0926-227X}, 
  url = {http://portal.acm.org/citation.cfm?id=1145953}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/halpern-oneill-2003.pdf},
}
handigol2012reproducible
@article{handigol2012reproducible,
  title = {Reproducible network experiments using container based emulation}, 
  author = {Handigol, N. and Heller, B. and Jeyakumar, V. and Lantz, B. and McKeown, N.}, 
  journal = {Proc. CoNEXT}, 
  year = {2012}, 
  www_section = {emulation, mininet, network, virtualization}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/mininet-hifi.pdf}, 
  url = {https://bibliography.gnunet.org}, 
}
herbivore:tr
@booklet{herbivore:tr,
  title = {Herbivore: A Scalable and Efficient Protocol for Anonymous Communication}, 
  author = {Goel, Sharad and Mark Robson and Milo Polte and Emin G{\"u}n Sirer}, 
  number = {2003-1890}, 
  year = {2003}, 
  month = {February}, 
  address = {Ithaca, NY}, 
  publisher = {Cornell University}, 
  abstract = {Anonymity is increasingly important for networked applications amidst
        concerns over censorship and privacy. In this paper, we describe Herbivore, a
        peer-to-peer, scalable, tamper-resilient communication system that provides
        provable anonymity and privacy. Building on dining cryptographer networks,
        Herbivore scales by partitioning the network into anonymizing cliques.
        Adversaries able to monitor all network traffic cannot deduce the identity of a
        sender or receiver beyond an anonymizing clique. In addition to strong anonymity,
        Herbivore simultaneously provides high efficiency and scalability, distinguishing
        it from other anonymous communication protocols. Performance measurements from a
        prototype implementation show that the system can achieve high bandwidths and low
        latencies when deployed over the Internet}, 
  www_section = {anonymity, P2P, privacy}, 
  url = {http://ecommons.cornell.edu/handle/1813/5606}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/herbivore-tr.pdf}, 
}
herrmann2010pet
@conference{herrmann2010pet,
  title = {Privacy-Implications of Performance-Based Peer Selection by Onion-Routers: A
        Real-World Case Study using I2P}, 
  author = {Michael Herrmann and Christian Grothoff}, 
  booktitle = {Privacy Enhancing Technologies Symposium (PETS 2011)}, 
  organization = {Springer Verlag}, 
  year = {2011}, 
  month = {April}, 
  address = {Waterloo, Canada}, 
  publisher = {Springer Verlag}, 
  abstract = {I2P is one of the most widely used anonymizing Peer-to-Peer networks on the
        Internet today. Like Tor, it uses onion routing to build tunnels between peers as
        the basis for providing anonymous communication channels. Unlike Tor, I2P
        integrates a range of anonymously hosted services directly with the platform.
        This paper presents a new attack on the I2P Peer-to-Peer network, with the goal
        of determining the identity of peers that are anonymously hosting HTTP services
        (Eepsite) in the network. Key design choices made by I2P developers, in
        particular performance-based peer selection, enable a sophisticated adversary
        with modest resources to break key security assumptions. Our attack first obtains
        an estimate of the victim's view of the network. Then, the adversary selectively
        targets a small number of peers used by the victim with a denial-of-service
        attack while giving the victim the opportunity to replace those peers with other
        peers that are controlled by the adversary. Finally, the adversary performs some
        simple measurements to determine the identity of the peer hosting the service.
        This paper provides the necessary background on I2P, gives details on the attack
        --- including experimental data from measurements against the actual I2P network
        --- and discusses possible solutions}, 
  www_section = {anonymity, attack, Guard, I2P, onion routing}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/pet2011i2p.pdf}, 
  url = {https://bibliography.gnunet.org}, 
}
heydt-benjamin:pet2006
@conference{heydt-benjamin:pet2006,
  title = {Privacy for Public Transportation}, 
  author = {Thomas S. Heydt-Benjamin and Hee-Jin Chae and Benessa Defend and Kevin Fu}, 
  booktitle = {Proceedings of the Sixth Workshop on Privacy Enhancing Technologies (PET
        2006)}, 
  organization = {Springer}, 
  year = {2006}, 
  month = {June}, 
  address = {Cambridge, UK}, 
  pages = {1--19}, 
  editor = {George Danezis and Philippe Golle}, 
  publisher = {Springer}, 
  abstract = {We propose an application of recent advances in e-cash, anonymous
        credentials, and proxy re-encryption to the problem of privacy in public transit
        systems with electronic ticketing. We discuss some of the interesting features of
        transit ticketing as a problem domain, and provide an architecture sufficient for
        the needs of a typical metropolitan transit system. Our system maintains the
        security required by the transit authority and the user while significantly
        increasing passenger privacy. Our hybrid approach to ticketing allows use of
        passive RFID transponders as well as higher powered computing devices such as
        smartphones or PDAs. We demonstrate security and privacy features offered by our
        hybrid system that are unavailable in a homogeneous passive transponder
        architecture, and which are advantageous for users of passive as well as active
        devices}, 
  www_section = {anonymity, privacy, re-encryption}, 
  isbn = {978-3-540-68790-0}, 
  doi = {10.1007/11957454}, 
  url = {http://www.springerlink.com/content/c75053mr42n82wv5/}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/heydt-benjamin-pet2006.pdf},
}
hintz02
@conference{hintz02,
  title = {Fingerprinting Websites Using Traffic Analysis}, 
  author = {Andrew Hintz}, 
  booktitle = {Proceedings of Privacy Enhancing Technologies workshop (PET 2002)}, 
  organization = {Springer-Verlag, LNCS 2482}, 
  year = {2002}, 
  month = {April}, 
  editor = {Roger Dingledine and Paul Syverson}, 
  publisher = {Springer-Verlag, LNCS 2482}, 
  abstract = {I present a traffic analysis based vulnerability in Safe Web, an encrypting
        web proxy. This vulnerability allows someone monitoring the traffic of a Safe Web
        user to determine if the user is visiting certain websites. I also describe a
        successful implementation of the attack. Finally, I discuss methods for improving
        the attack and for defending against the attack}, 
  www_section = {traffic analysis, vulnerability}, 
  isbn = {978-3-540-00565-0}, 
  doi = {10.1007/3-540-36467-6}, 
  url = {http://www.springerlink.com/content/c4qwe6d608p2cjyv/}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/hintz02.pdf}, 
}
hitting-set04
@conference{hitting-set04,
  title = {The Hitting Set Attack on Anonymity Protocols}, 
  author = {Dogan Kesdogan and Lexi Pimenidis}, 
  booktitle = {Proceedings of 6th Information Hiding Workshop (IH 2004)}, 
  year = {2004}, 
  month = {May}, 
  address = {Toronto}, 
  series = {LNCS}, 
  abstract = {A passive attacker can compromise a generic anonymity protocol by applying
        the so called disclosure attack, i.e. a special traffic analysis attack. In this
        work we present a more efficient way to accomplish this goal, i.e. we need less
        observations by looking for unique minimal hitting sets. We call this the hitting
        set attack or just HS-attack. In general, solving the minimal hitting set problem
        is NP-hard. Therefore, we use frequency analysis to enhance the applicability of
        our attack. It is possible to apply highly efficient backtracking search
        algorithms. We call this approach the statistical hitting set attack or
        SHS-attack. However, the statistical hitting set attack is prone to wrong
        solutions with a given small probability. We use here duality checking algorithms
        to resolve this problem. We call this final exact attack the HS*-attack}, 
  www_section = {anonymity, hitting set attack, traffic analysis}, 
  doi = {10.1007/b104759}, 
  url = {http://www.springerlink.com/content/t6bkk4tyjvr71m55/}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/hitting-set04.pdf}, 
}
hs-attack06
@conference{hs-attack06,
  title = {Locating Hidden Servers}, 
  author = {Lasse {\O}verlier and Paul Syverson}, 
  booktitle = {Proceedings of the 2006 IEEE Symposium on Security and Privacy}, 
  organization = {IEEE CS}, 
  year = {2006}, 
  month = {May}, 
  publisher = {IEEE CS}, 
  abstract = {Hidden services were deployed on the Tor anonymous communication network in
        2004. Announced properties include server resistance to distributed DoS. Both the
        EFF and Reporters Without Borders have issued guides that describe using hidden
        services via Tor to protect the safety of dissidents as well as to resist
        censorship. We present fast and cheap attacks that reveal the location of a
        hidden server. Using a single hostile Tor node we have located deployed hidden
        servers in a matter of minutes. Although we examine hidden services over Tor, our
        results apply to any client using a variety of anonymity networks. In fact, these
        are the first actual intersection attacks on any deployed public network: thus
        confirming general expectations from prior theory and simulation. We recommend
        changes to route selection design and implementation for Tor. These changes
        require no operational increase in network overhead and are simple to make; but
        they prevent the attacks we have demonstrated. They have been implemented}, 
  www_section = {anonymity measurement, Guard, Tor}, 
  isbn = {0-7695-2574-1}, 
  doi = {10.1109/SP.2006.24}, 
  url = {http://portal.acm.org/citation.cfm?id=1130366}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/hs-attack06.pdf}, 
}
hybrid-mix
@conference{hybrid-mix,
  title = {A Length-Invariant Hybrid MIX}, 
  author = {Miyako Ohkubo and Masayuki Abe}, 
  booktitle = {Proceedings of ASIACRYPT 2000}, 
  organization = {Springer-Verlag, LNCS 1976}, 
  year = {2000}, 
  publisher = {Springer-Verlag, LNCS 1976}, 
  abstract = {This paper presents a secure and flexible Mix-net that has the following
        properties; it efficiently handles long plaintexts that exceed the modulus size
        of underlying public-key encryption as well as very short ones (length-flexible),
        input ciphertext length is not impacted by the number of mix-servers
        (length-invariant), and its security in terms of anonymity is proven in a formal
        way (provably secure). One can also add robustness i.e. it outputs correct
        results in the presence of corrupt servers. The security is proved in the random
        oracle model by showing a reduction from breaking the anonymity of our Mix-net to
        breaking a sort of indistinguishability of the underlying symmetric encryption
        scheme or solving the Decision Diffie-Hellman problem}, 
  www_section = {hybrid encryption, mix}, 
  isbn = {3-540-41404-5}, 
  doi = {10.1007/3-540-44448-3_14}, 
  url = {http://portal.acm.org/citation.cfm?id=647096.716874}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.87.7718.pdf}, 
}
ian-thesis
@mastersthesis{ian-thesis,
  title = {A Pseudonymous Communications Infrastructure for the Internet}, 
  author = {Ian Goldberg}, 
  school = {UC Berkeley}, 
  year = {2000}, 
  month = {December}, 
  type = {phd}, 
  abstract = {A Pseudonymous Communications Infrastructure for the Internet by Ian Avrum
        Goldberg Doctor of Philosophy in Computer Science University of California at
        Berkeley Professor Eric Brewer, Chair As more and more of people's everyday
        activities are being conducted online, there is an ever-increasing threat to
        personal privacy. Every communicative or commercial transaction you perform
        online reveals bits of information about you that can be compiled into large
        dossiers, often without your permission, or even your knowledge}, 
  www_section = {pseudonym}, 
  isbn = {0-493-10500-X}, 
  url = {http://portal.acm.org/citation.cfm?id=933285}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.5.3353.pdf}, 
}
icdcs2006:m2
@conference{icdcs2006:m2,
  title = {M2: Multicasting Mixes for Efficient and Anonymous Communication}, 
  author = {Ginger Perng and Michael K. Reiter and Chenxi Wang}, 
  booktitle = {Proceedings of the 26th IEEE Conference on Distributed Computing Systems}, 
  year = {2006}, 
  month = {July}, 
  abstract = {We present a technique to achieve anonymous multicasting in mix networks to
        deliver content from producers to consumers. Employing multicast allows content
        producers to send (and mixes to forward) information to multiple consumers
        without repeating work for each individual consumer. In our approach, consumers
        register interest for content by creating paths in the mix network to the
        content's producers. When possible, these paths are merged in the network so that
        paths destined for the same producer share a common path suffix to the producer.
        When a producer sends content, the content travels this common suffix toward its
        consumers (in the reverse direction) and "branches" into multiple messages when
        necessary. We detail the design of this technique and then analyze the
        unlinkability of our approach against a global, passive adversary who controls
        both the producer and some mixes. We show that there is a subtle degradation of
        unlinkability that arises from multicast. We discuss techniques to tune our
        design to mitigate this degradation while retaining the benefits of multicast}, 
  www_section = {anonymous multicast}, 
  isbn = {0-7695-2540-7}, 
  doi = {10.1109/ICDCS.2006.53}, 
  url = {http://ieeexplore.ieee.org/Xplore/login.jsp?url=http\%3A\%2F\%2Fieeexplore.ieee.org\%2Fiel5\%2F10967\%2F34569\%2F01648846.pdf\%3Ftp\%3D\%26isnumber\%3D\%26arnumber\%3D1648846\&authDecision=-203},
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/icdcs2006-m2.pdf}, 
}
idemix
@conference{idemix,
  title = {Design and implementation of the idemix anonymous credential system}, 
  author = {Jan Camenisch and Els Van Herreweghen}, 
  booktitle = {Proceedings of the 9th ACM conference on Computer and communications
        security (CCS 2002)}, 
  organization = {ACM Press}, 
  year = {2002}, 
  address = {New York, NY, USA}, 
  pages = {21--30}, 
  publisher = {ACM Press}, 
  abstract = {Anonymous credential systems [8, 9, 12, 24] allow anonymous yet authenticated
        and accountable transactions between users and service providers. As such, they
        represent a powerful technique for protecting users' privacy when conducting
        Internet transactions. In this paper, we describe the design and implementation
        of an anonymous credential system based on the protocols developed by [6]. The
        system is based on new high-level primitives and interfaces allowing for easy
        integration into access control systems. The prototype was realized in Java. We
        demonstrate its use and some deployment issues with the description of an
        operational demonstration scenario}, 
  www_section = {anonymity, anonymous credential system}, 
  isbn = {1-58113-612-9}, 
  doi = {http://doi.acm.org/10.1145/586110.586114}, 
  url = {http://portal.acm.org/citation.cfm?id=586114}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/idemix.pdf}, 
}
ih05-Klonowski
@conference{ih05-Klonowski,
  title = {Provable Anonymity for Networks of Mixes}, 
  author = {Marek Klonowski and Miroslaw Kutylowski}, 
  booktitle = {Proceedings of Information Hiding Workshop (IH 2005)}, 
  organization = {Springer Berlin / Heidelberg}, 
  year = {2005}, 
  month = {June}, 
  pages = {26--38}, 
  publisher = {Springer Berlin / Heidelberg}, 
  abstract = {We analyze networks of mixes used for providing untraceable communication. We
        consider a network consisting of k mixes working in parallel and exchanging the
        outputs -- which is the most natural architecture for composing mixes of a
        certain size into networks able to mix a larger number of inputs at once. We
        prove that after O(log k) rounds the network considered provides a fair level of
        privacy protection for any number of messages. No mathematical proof of this kind
        has been published before. We show that if at least one of server is corrupted we
        need substantially more rounds to meet the same requirements of privacy
        protection}, 
  www_section = {anonymity, coupling, Markov chain, rapid mixing}, 
  isbn = {978-3-540-29039-1}, 
  doi = {10.1007/11558859}, 
  url = {http://www.springerlink.com/content/777769630v335773/}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/ih05-Klonowski.pdf}, 
}
ih05-Luke
@conference{ih05-Luke,
  title = {On Blending Attacks For Mixes with Memory}, 
  author = {Luke O'Connor}, 
  booktitle = {Proceedings of Information Hiding Workshop (IH 2005)}, 
  organization = {Springer Berlin / Heidelberg}, 
  year = {2005}, 
  month = {June}, 
  pages = {39--52}, 
  publisher = {Springer Berlin / Heidelberg}, 
  abstract = {Blending attacks are a general class of traffic-based attacks, exemplified by
        the (n--1)-attack. Adding memory or pools to mixes mitigates against such
        attacks, however there are few known quantitative results concerning the effect
        of pools on blending attacks. In this paper we give a precise analysis of the
        number of rounds required to perform an (n--1)-attack on the pool mix, timed pool
        mix, timed dynamic pool mix and the binomial mix}, 
  www_section = {mix, traffic analysis}, 
  isbn = {978-3-540-29039-1}, 
  doi = {10.1007/11558859}, 
  url = {http://www.springerlink.com/index/y78350424h77u578.pdf}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/ih05-Luke.pdf}, 
}
ih05-csispir
@conference{ih05-csispir,
  title = {Censorship Resistance Revisited}, 
  author = {Ginger Perng and Michael K. Reiter and Chenxi Wang}, 
  booktitle = {Proceedings of Information Hiding Workshop (IH 2005)}, 
  organization = {Springer Berlin / Heidelberg}, 
  year = {2005}, 
  month = {June}, 
  pages = {62--76}, 
  publisher = {Springer Berlin / Heidelberg}, 
  abstract = {{\textquotedblleft}Censorship resistant{\textquotedblright} systems attempt
        to prevent censors from imposing a particular distribution of content across a
        system. In this paper, we introduce a variation of censorship resistance (CR)
        that is resistant to selective filtering even by a censor who is able to inspect
        (but not alter) the internal contents and computations of each data server,
        excluding only the server's private signature key. This models a service provided
        by operators who do not hide their identities from censors. Even with such a
        strong adversarial model, our definition states that CR is only achieved if the
        censor must disable the entire system to filter selected content. We show that
        existing censorship resistant systems fail to meet this definition; that Private
        Information Retrieval (PIR) is necessary, though not sufficient, to achieve our
        definition of CR; and that CR is achieved through a modification of PIR for which
        known implementations exist}, 
  www_section = {censorship resistance, private information retrieval}, 
  isbn = {978-3-540-29039-1}, 
  doi = {10.1007/11558859}, 
  url = {http://www.springerlink.com/content/f08707qw34614340/}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/ih05-csispir.pdf}, 
}
ih05-danezisclulow
@conference{ih05-danezisclulow,
  title = {Compulsion Resistant Anonymous Communications}, 
  author = {George Danezis and Jolyon Clulow}, 
  booktitle = {Proceedings of Information Hiding Workshop (IH 2005)}, 
  organization = {Springer Berlin / Heidelberg}, 
  year = {2005}, 
  month = {June}, 
  pages = {11--25}, 
  publisher = {Springer Berlin / Heidelberg}, 
  abstract = {We study the effect compulsion attacks, through which an adversary can
        request a decryption or key from an honest node, have on the security of mix
        based anonymous communication systems. Some specific countermeasures are proposed
        that increase the cost of compulsion attacks, detect that tracing is taking place
        and ultimately allow for some anonymity to be preserved even when all nodes are
        under compulsion. Going beyond the case when a single message is traced, we also
        analyze the effect of multiple messages being traced and devise some techniques
        that could retain some anonymity. Our analysis highlights that we can reason
        about plausible deniability in terms of the information theoretic anonymity
        metrics}, 
  www_section = {countermeasure, mix}, 
  isbn = {978-3-540-29039-1}, 
  doi = {10.1007/11558859}, 
  url = {http://www.springerlink.com/content/74461772r675l828/}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/ih05-danezisclulow.pdf},
}
improved-clockskew
@conference{improved-clockskew,
  title = {An Improved Clock-skew Measurement Technique for Revealing Hidden Services}, 
  author = {Sebastian Zander and Steven J. Murdoch}, 
  booktitle = {Proceedings of the 17th USENIX Security Symposium}, 
  organization = {USENIX Association Berkeley, CA, USA}, 
  year = {2008}, 
  month = {July}, 
  address = {San Jose, CA, US}, 
  publisher = {USENIX Association Berkeley, CA, USA}, 
  abstract = {The Tor anonymisation network allows services, such as web servers, to be
        operated under a pseudonym. In previous work Murdoch described a novel attack to
        reveal such hidden services by correlating clock skew changes with times of
        increased load, and hence temperature. Clock skew measurement suffers from two
        main sources of noise: network jitter and timestamp quantisation error. Depending
        on the target's clock frequency the quantisation noise can be orders of magnitude
        larger than the noise caused by typical network jitter. Quantisation noise limits
        the previous attacks to situations where a high frequency clock is available. It
        has been hypothesised that by synchronising measurements to the clock ticks,
        quantisation noise can be reduced. We show how such synchronisation can be
        achieved and maintained, despite network jitter. Our experiments show that
        synchronised sampling significantly reduces the quantisation error and the
        remaining noise only depends on the network jitter (but not clock frequency). Our
        improved skew estimates are up to two magnitudes more accurate for low-resolution
        timestamps and up to one magnitude more accurate for high-resolution timestamps,
        when compared to previous random sampling techniques. The improved accuracy not
        only allows previous attacks to be executed faster and with less network traffic
        but also opens the door to previously infeasible attacks on low-resolution
        clocks, including measuring skew of a HTTP server over the anonymous channel}, 
  www_section = {anonymity, pseudonym, Tor}, 
  url = {http://portal.acm.org/citation.cfm?id=1496726}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/improved-clockskew.pdf},
}
incentives-fc10
@conference{incentives-fc10,
  title = {Building Incentives into Tor}, 
  author = {Tsuen-Wan {\textquoteleft}{\textquoteleft}Johnny'' Ngan and Roger Dingledine
        and Dan S. Wallach}, 
  booktitle = {Proceedings of Financial Cryptography (FC '10)}, 
  year = {2010}, 
  month = {January}, 
  editor = {Radu Sion}, 
  abstract = {Distributed anonymous communication networks like Tor depend on volunteers to
        donate their resources. However, the efforts of Tor volunteers have not grown as
        fast as the demands on the Tor network.We explore techniques to incentivize Tor
        users to relay Tor traffic too; if users contribute resources to the Tor overlay,
        they should receive faster service in return. In our design, the central Tor
        directory authorities measure performance and publish a list of Tor relays that
        should be given higher priority when establishing circuits. Simulations of our
        proposed design show that conforming users receive significant improvements in
        performance, in some cases experiencing twice the network throughput of selfish
        users who do not relay traffic for the Tor network}, 
  www_section = {Tor}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/incentives-fc10.pdf}, 
  url = {https://bibliography.gnunet.org}, 
}
incomparable-pkeys
@conference{incomparable-pkeys,
  title = {Receiver Anonymity via Incomparable Public Keys}, 
  author = {Waters, Brent and Edward W. Felten and Amit Sahai}, 
  booktitle = {Proceedings of the 10th ACM Conference on Computer and Communications
        Security (CCS 2003)}, 
  organization = {ACM Press}, 
  year = {2003}, 
  month = {October}, 
  pages = {112--121}, 
  editor = {Vijay Atluri and Peng Liu}, 
  publisher = {ACM Press}, 
  abstract = {We describe a new method for protecting the anonymity of message receivers in
        an untrusted network. Surprisingly, existing methods fail to provide the required
        level of anonymity for receivers (although those methods do protect sender
        anonymity). Our method relies on the use of multicast, along with a novel
        cryptographic primitive that we call an Incomparable Public Key cryptosystem,
        which allows a receiver to efficiently create many anonymous "identities" for
        itself without divulging that these separate "identities" actually refer to the
        same receiver, and without increasing the receiver's workload as the number of
        identities increases. We describe the details of our method, along with a
        prototype implementation}, 
  www_section = {anonymity, PGP, privacy, public key cryptography}, 
  isbn = {1-58113-738-9}, 
  doi = {10.1145/948109.948127}, 
  url = {http://portal.acm.org/citation.cfm?id=948127}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/incomparable-pkeys.pdf},
}
infranet
@conference{infranet,
  title = {Infranet: Circumventing Web Censorship and Surveillance}, 
  author = {Nick Feamster and Magdalena Balazinska and Greg Harfst and Hari Balakrishnan
        and David Karger}, 
  booktitle = {Proceedings of the 11th USENIX Security Symposium}, 
  organization = {USENIX Association Berkeley, CA, USA}, 
  year = {2002}, 
  month = {August}, 
  publisher = {USENIX Association Berkeley, CA, USA}, 
  abstract = {An increasing number of countries and companies routinely block or monitor
        access to parts of the Internet. To counteract these measures, we propose
        Infranet, a system that enables clients to surreptitiously retrieve sensitive
        content via cooperating Web servers distributed across the global Internet. These
        Infranet servers provide clients access to censored sites while continuing to
        host normal uncensored content. Infranet uses a tunnel protocol that provides a
        covert communication channel between its clients and servers, modulated over
        standard HTTP transactions that resemble innocuous Web browsing. In the upstream
        direction, Infranet clients send covert messages to Infranet servers by
        associating meaning to the sequence of HTTP requests being made. In the
        downstream direction, Infranet servers return content by hiding censored data in
        uncensored images using steganographic techniques. We describe the design, a
        prototype implementation, security properties, and performance of Infranet. Our
        security analysis shows that Infranet can successfully circumvent several
        sophisticated censoring techniques}, 
  www_section = {censorship resistance, infranet}, 
  isbn = {1-931971-00-5}, 
  url = {http://portal.acm.org/citation.cfm?id=720281}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/infranet.pdf}, 
}
ishai2006ca
@article{ishai2006ca,
  title = {Cryptography from Anonymity}, 
  author = {Yuval Ishai and Eyal Kushilevitz and Rafail Ostrovsky and Amit Sahai}, 
  journal = {Proceedings of the 47th Annual IEEE Symposium on Foundations of Computer
        Science (FOCS'06)-Volume 00}, 
  year = {2006}, 
  pages = {239--248}, 
  publisher = {IEEE Computer Society Washington, DC, USA}, 
  abstract = {There is a vast body of work on implementing anonymous communication. In this
        paper, we study the possibility of using anonymous communication as a building
        block, and show that one can leverage on anonymity in a variety of cryptographic
        contexts. Our results go in two directions.--Feasibility. We show that anonymous
        communication over insecure channels can be used to implement unconditionally
        secure point-to-point channels, broadcast, and generalmulti-party protocols that
        remain unconditionally secure as long as less than half of the players are
        maliciously corrupted.--Efficiency. We show that anonymous channels can yield
        substantial efficiency improvements for several natural secure computation tasks.
        In particular, we present the first solution to the problem of private
        information retrieval (PIR) which can handle multiple users while being close to
        optimal with respect to both communication and computation.A key observation that
        underlies these results is that local randomization of inputs, via
        secret-sharing, when combined with the global mixing of the shares, provided by
        anonymity, allows to carry out useful computations on the inputs while keeping
        the inputs private}, 
  www_section = {anonymity, private information retrieval}, 
  isbn = {0-7695-2720-5}, 
  issn = {0272-5428}, 
  doi = {10.1109/FOCS.2006.25}, 
  url = {http://portal.acm.org/citation.cfm?id=1170505}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/ishai2006ca.pdf}, 
}
izal:dissecting
@conference{izal:dissecting,
  title = {Dissecting BitTorrent: Five Months in a Torrent's Lifetime}, 
  author = {Mikel Izal and Guillaume Urvoy-Keller and E W Biersack and Pascal Felber and
        Anwar Al Hamra and L Garc{\'e}s-Erice}, 
  booktitle = {PAM '04. Proceedings of Passive and Active Measurements}, 
  organization = {Springer}, 
  volume = {3015}, 
  year = {2004}, 
  month = {April}, 
  address = {Antibes Juan-les-Pins, France}, 
  pages = {1--11}, 
  publisher = {Springer}, 
  series = {Lecture Notes in Computer Science}, 
  abstract = {Popular content such as software updates is requested by a large number of
        users. Traditionally, to satisfy a large number of requests, lager server farms
        or mirroring are used, both of which are expensive. An inexpensive alternative
        are peer-to-peer based replication systems, where users who retrieve the file,
        act simultaneously as clients and servers. In this paper, we study BitTorrent, a
        new and already very popular peer-to-peer application that allows distribution of
        very large contents to a large set of hosts. Our analysis of BitTorrent is based
        on measurements collected on a five months long period that involved thousands of
        peers}, 
  www_section = {BitTorrent, P2P, peer-to-peer networking, replication system}, 
  doi = {10.1007/978-3-540-24668-8_1}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/PAM\%2704\%20-\%20Dissecting\%20bittorrent.pdf},
}
jakobsson-optimally
@conference{jakobsson-optimally,
  title = {An Optimally Robust Hybrid Mix Network (Extended Abstract)}, 
  author = {Jakobsson, Markus and Ari Juels}, 
  booktitle = {Proceedings of Principles of Distributed Computing--{PODC} '01}, 
  organization = {ACM Press}, 
  year = {2001}, 
  publisher = {ACM Press}, 
  abstract = {We present a mix network that achieves efficient integration of public-key
        and symmetric-key operations. This hybrid mix network is capable of natural
        processing of arbitrarily long input elements, and is fast in both practical and
        asymptotic senses. While the overhead in the size of input elements is linear in
        the number of mix servers, it is quite small in practice. In contrast to previous
        hybrid constructions, ours has optimal robustness, that is, robustness against
        any minority coalition of malicious servers}, 
  www_section = {mix, public key cryptography, robustness}, 
  isbn = {1-58113-383-9}, 
  url = {http://portal.acm.org/citation.cfm?id=383962.384046}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.24.8205.pdf}, 
}
jayarama2015
@mastersthesis{jayarama2015,
  title = {Publish/Subscribe for Large-Scale Social Interaction: Design, Analysis and
        Ressource Provisioning}, 
  author = {Vinay Jayarama Setty}, 
  school = {University of Oslo}, 
  volume = {Doctor of Philosophy}, 
  year = {2015}, 
  month = {March}, 
  www_section = {publish-subscribe, pubsub, social interaction, messaging, multicast}, 
  www_pdf_url = {https://www.duo.uio.no/bitstream/handle/10852/43117/1595-Setty-DUO-Thesis.pdf},
  www_tags = {selected}, 
}
journals/corr/abs-1109-0971
@article{journals/corr/abs-1109-0971,
  title = {X-Vine: Secure and Pseudonymous Routing Using Social Networks}, 
  author = {Prateek Mittal and Matthew Caesar and Borisov, Nikita}, 
  journal = {Computer Research Repository}, 
  volume = {abs/1109.0971}, 
  year = {2011}, 
  month = {September}, 
  abstract = {Distributed hash tables suffer from several security and privacy
        vulnerabilities, including the problem of Sybil attacks. Existing social
        network-based solutions to mitigate the Sybil attacks in DHT routing have a high
        state requirement and do not provide an adequate level of privacy. For instance,
        such techniques require a user to reveal their social network contacts. We design
        X-Vine, a protection mechanism for distributed hash tables that operates entirely
        by communicating over social network links. As with traditional peer-to-peer
        systems, X-Vine provides robustness, scalability, and a platform for innovation.
        The use of social network links for communication helps protect participant
        privacy and adds a new dimension of trust absent from previous designs. X-Vine is
        resilient to denial of service via Sybil attacks, and in fact is the first Sybil
        defense that requires only a logarithmic amount of state per node, making it
        suitable for large-scale and dynamic settings. X-Vine also helps protect the
        privacy of users social network contacts and keeps their IP addresses hidden from
        those outside of their social circle, providing a basis for pseudonymous
        communication. We first evaluate our design with analysis and simulations, using
        several real world large-scale social networking topologies. We show that the
        constraints of X-Vine allow the insertion of only a logarithmic number of Sybil
        identities per attack edge; we show this mitigates the impact of malicious
        attacks while not affecting the performance of honest nodes. Moreover, our
        algorithms are efficient, maintain low stretch, and avoid hot spots in the
        network. We validate our design with a PlanetLab implementation and a Facebook
        plugin}, 
  www_section = {anonymity, cryptography, dblp, distributed hash table, for:isp, routing,
        security, social-network-routing}, 
  url = {http://dblp.uni-trier.de/db/journals/corr/corr1109.html$\#$abs-1109-0971}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/CoRR\%20-\%20X-Vine.pdf},
}
karger77
@mastersthesis{karger77,
  title = {Non-Discretionary Access Control for Decentralized Computing Systems}, 
  author = {Paul A. Karger}, 
  school = {Laboratory for Computer Science, Massachusetts Institute of Technology}, 
  number = {MIT/LCS/TR-179}, 
  year = {1977}, 
  month = {May}, 
  address = {Cambridge, MA}, 
  type = {S. M. \& E. E. thesis}, 
  abstract = {This thesis examines the issues relating to non-discretionary access controls
        for decentralized computing systems. Decentralization changes the basic character
        of a computing system from a set of processes referencing a data base to a set of
        processes sending and receiving messages. Because messages must be acknowledged,
        operations that were read-only in a centralized system become read-write
        operations. As a result, the lattice model of non-discretionary access control,
        which mediates operations based on read versus read-write considerations, does
        not allow direct transfer of algorithms from centralized systems to decentralized
        systems. This thesis develops new mechanisms that comply with the lattice model
        and provide the necessary functions for effective decentralized computation.
        Secure protocols at several different levels are presented in the thesis. At the
        lowest level, a host or host protocol is shown that allows communication between
        hosts with effective internal security controls. Above this level, a host
        independent naming scheme is presented that allows generic naming of services in
        a manner consistent with the lattice model. The use of decentralized processing
        to aid in the downgrading of information is shown in the design of a secure
        intelligent terminal. Schemes are presented to deal with the decentralized
        administration of the lattice model, and with the proliferation of access classes
        as the user community of a decentralized system become more diverse. Limitations
        in the use of end-to-end encryption when used with the lattice model are
        identified, and a scheme is presented to relax these limitations for broadcast
        networks. Finally, a scheme is presented for forwarding authentication
        information between hosts on a network, without transmitting passwords (or their
        equivalent) over a network}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/MIT-LCS-TR-179.pdf}, 
  www_section = {Unsorted}, 
  url = {https://bibliography.gnunet.org}, 
}
kesdogan:pet2002
@conference{kesdogan:pet2002,
  title = {Unobservable Surfing on the World Wide Web: Is Private Information Retrieval an
        alternative to the MIX based Approach?}, 
  author = {Dogan Kesdogan and Mark Borning and Michael Schmeink}, 
  booktitle = {Proceedings of Privacy Enhancing Technologies workshop (PET 2002)}, 
  organization = {Springer-Verlag, LNCS 2482}, 
  year = {2002}, 
  month = {April}, 
  editor = {Roger Dingledine and Paul Syverson}, 
  publisher = {Springer-Verlag, LNCS 2482}, 
  abstract = {The technique Private Information Retrieval (PIR) perfectly protects a user's
        access pattern to a database. An attacker cannot observe (or determine) which
        data element is requested by a user and so cannot deduce the interest of the
        user. We discuss the application of PIR on the World Wide Web and compare it to
        the MIX approach. We demonstrate particularly that in this context the method
        does not provide perfect security, and we give a mathematical model for the
        amount of information an attacker could obtain. We provide an extension of the
        method under which perfect security can still be achieved}, 
  www_section = {private information retrieval}, 
  url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.80.7678}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/PIR_Kesdogan.pdf}, 
}
kevin-thesis
@mastersthesis{kevin-thesis,
  title = {Improving Security and Performance in Low Latency Anonymity Networks}, 
  author = {Kevin Bauer}, 
  school = {University of Colorado}, 
  year = {2011}, 
  month = {May}, 
  pages = {0--240}, 
  type = {PhD}, 
  abstract = {Conventional wisdom dictates that the level of anonymity offered by low
        latency anonymity networks increases as the user base grows. However, the most
        significant obstacle to increased adoption of such systems is that their security
        and performance properties are perceived to be weak. In an effort to help foster
        adoption, this dissertation aims to better understand and improve security,
        anonymity, and performance in low latency anonymous communication systems. To
        better understand the security and performance properties of a popular low
        latency anonymity network, we characterize Tor, focusing on its application
        protocol distribution, geopolitical client and router distributions, and
        performance. For instance, we observe that peer-to-peer file sharing protocols
        use an unfair portion of the network's scarce bandwidth. To reduce the congestion
        produced by bulk downloaders in networks such as Tor, we design, implement, and
        analyze an anonymizing network tailored specifically for the BitTorrent
        peer-to-peer file sharing protocol. We next analyze Tor's security and anonymity
        properties and empirically show that Tor is vulnerable to practical end-to-end
        traffic correlation attacks launched by relatively weak adversaries that inflate
        their bandwidth claims to attract traffic and thereby compromise key positions on
        clients' paths. We also explore the security and performance trade-offs that
        revolve around path length design decisions and we show that shorter paths offer
        performance benefits and provide increased resilience to certain attacks.
        Finally, we discover a source of performance degradation in Tor that results from
        poor congestion and flow control. To improve Tor's performance and grow its user
        base, we offer a fresh approach to congestion and flow control inspired by
        techniques from IP and ATM networks}, 
  www_section = {low latency anonymous networks, performance, security}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/kevin-thesis.pdf}, 
  url = {https://bibliography.gnunet.org}, 
}
kising07proxselectigor
@mastersthesis{kising07proxselectigor,
  title = {Proximity Neighbor Selection and Proximity Route Selection for the
        Overlay-Network IGOR}, 
  author = {Yves Philippe Kising}, 
  school = {Technische Universit{\"a}t M{\"u}nchen}, 
  volume = {Computer Science}, 
  year = {2007}, 
  month = {June}, 
  address = {Munich, Germany}, 
  pages = {0--79}, 
  type = {Diplomarbeit}, 
  abstract = {Unfortunately, from all known "Distributed Hash Table"-based overlay networks
        only a few of them relate to proximity in terms of latency. So a query routing
        can come with high latency when very distant hops are used. One can imagine hops
        are from one continent to the other in terms of here and back. Thereby it is
        possible that the target node is located close to the requesting node. Such cases
        increase query latency to a great extent and are responsible for performance
        bottlenecks of a query routing. There exist two main strategies to reduce latency
        in the query routing process: Proximity Neighbor Selection and Proximity Route
        Selection. As a new proposal of PNS for the IGOR overlay network, Merivaldi is
        developed. Merivaldi represents a combination of two basic ideas: The first idea
        is the Meridian framework and its Closest-Node- Discovery without synthetic
        coordinates. The second idea is Vivaldi, a distributed algorithm for predicting
        Internet latency between arbitrary Internet hosts. Merivaldi is quite similar to
        Meridian. It differs in using no direct Round Trip Time measurements like
        Meridian does to obtain latency characteristics between hosts. Merivaldi obtains
        latency characteristics of nodes using the latency prediction derived from the
        Vivaldi-coordinates. A Merivaldi-node forms exponentially growing latency-rings,
        i.e., the rings correspond to latency distances to the Merivaldi-node itself. In
        these rings node-references are inserted with regard to their latency
        characteristics. These node-references are obtained through a special protocol. A
        Merivaldi-node finds latency-closest nodes through periodic querying its
        ring-members for closer nodes. If a closer node is found by a ring-member the
        query is forwarded to this one until no closer one can be found. The closest on
        this way reports itself to the Merivaldi-node. Exemplary analysis show that
        Merivaldi means only a modest burden for the network. Merivaldi uses O(log N)
        CND-hops at maximum to recognize a closest node, where N is the number of nodes.
        Empirical tests demonstrate this analysis. Analysis shows, the overhead for a
        Merivaldi-node is modest. It is shown that Merivaldi's Vivaldi works with high
        quality with the used PING-message type}, 
  www_section = {IGOR, neighbor selection, overlay-network, proximity route selection}, 
  url = {http://i30www.ira.uka.de/teaching/theses/pasttheses/}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Kising\%20-\%20Proximity\%20Neighbor\%20Selection\%20for\%20IGOR.pdf},
}
kissner04private
@article{kissner04private,
  title = {Private keyword-based push and pull with applications to anonymous
        communication}, 
  author = {Lea Kissner and Alina Oprea and Michael K. Reiter and Dawn Xiaodong Song and Ke
        Yang}, 
  journal = {Applied Cryptography and Network Security}, 
  year = {2004}, 
  abstract = {We propose a new keyword-based Private Information Retrieval (PIR) model that
        allows private modification of the database from which information is requested.
        In our model, the database is distributed over n servers, any one of which can
        act as a transparent interface for clients. We present protocols that support
        operations for accessing data, focusing on privately appending labelled records
        to the database (push) and privately retrieving the next unseen record appended
        under a given label (pull). The communication complexity between the client and
        servers is independent of the number of records in the database (or more
        generally, the number of previous push and pull operations) and of the number of
        servers. Our scheme also supports access control oblivious to the database
        servers by implicitly including a public key in each push, so that only the party
        holding the private key can retrieve the record via pull. To our knowledge, this
        is the first system that achieves the following properties: private database
        modification, private retrieval of multiple records with the same keyword, and
        oblivious access control. We also provide a number of extensions to our protocols
        and, as a demonstrative application, an unlinkable anonymous communication
        service using them}, 
  www_section = {distributed database, private information retrieval, private key, public
        key cryptography}, 
  isbn = {3-540-22217-0}, 
  issn = {0302-9743}, 
  url = {http://cat.inist.fr/?aModele=afficheN\&cpsidt=15852065}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/kissner04private.pdf}, 
}
knight2012autonetkit
@article{knight2012autonetkit,
  title = {AutoNetkit: simplifying large scale, open-source network experimentation}, 
  author = {Knight, Simon and Jaboldinov, Askar and Maennel, Olaf and Phillips, Iain and
        Roughan, Matthew}, 
  journal = {SIGCOMM Comput. Commun. Rev}, 
  volume = {42}, 
  number = {4}, 
  year = {2012}, 
  address = {New York, NY, USA}, 
  pages = {97--98}, 
  publisher = {ACM}, 
  www_section = {automated configuration, emulation, Network management}, 
  issn = {0146-4833}, 
  doi = {10.1145/2377677.2377699}, 
  url = {http://doi.acm.org/10.1145/2377677.2377699}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/autonetkit-small.pdf}, 
}
koepsell:wpes2004
@conference{koepsell:wpes2004,
  title = {How to Achieve Blocking Resistance for Existing Systems Enabling Anonymous Web
        Surfing}, 
  author = {Stefan K{\"o}psell and Ulf Hilling}, 
  booktitle = {Proceedings of the Workshop on Privacy in the Electronic Society (WPES
        2004)}, 
  organization = {ACM New York, NY, USA}, 
  year = {2004}, 
  month = {October}, 
  address = {Washington, DC, USA}, 
  publisher = {ACM New York, NY, USA}, 
  abstract = {We are developing a blocking resistant, practical and usable system for
        anonymous web surfing. This means, the system tries to provide as much
        reachability and availability as possible, even to users in countries where the
        free flow of information is legally, organizationally and physically restricted.
        The proposed solution is an add-on to existing anonymity systems. First we give a
        classification of blocking criteria and some general countermeasures. Using these
        techniques, we outline a concrete design, which is based on the JAP-Web Mixes
        (aka AN.ON)}, 
  www_section = {blocking resistance, JAP, mix}, 
  isbn = {1-58113-968-3}, 
  doi = {10.1145/1029179.1029197}, 
  url = {http://portal.acm.org/citation.cfm?id=1029179.1029197}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/koepsell-wpes2004_0.pdf},
}
kostas-thesis
@mastersthesis{kostas-thesis,
  title = {Probabilistic and Information-Theoretic Approaches to Anonymity}, 
  author = {Konstantinos Chatzikokolakis}, 
  school = {Laboratoire d'Informatique (LIX), {\'E}cole Polytechnique, Paris}, 
  year = {2007}, 
  month = {October}, 
  type = {phd}, 
  abstract = {As the number of Internet activities increases, there is a growing amount of
        personal information about the users that is transferred using public electronic
        means, making it feasible to collect a huge amount of information about a person.
        As a consequence, the need for mechanisms to protect such information is
        compelling. In this thesis, we study security protocols with an emphasis on the
        property of anonymity and we propose methods to express and verify this property.
        Anonymity protocols often use randomization to introduce noise, thus limiting the
        inference power of a malicious observer. We consider a probabilistic framework in
        which a protocol is described by its set of anonymous information, observable
        information and the conditional probability of observing the latter given the
        former. In this framework we express two anonymity properties, namely strong
        anonymity and probable innocence. Then we aim at quantitative definitions of
        anonymity. We view protocols as noisy channels in the information-theoretic sense
        and we express their degree of anonymity as the converse of channel capacity. We
        apply this definition to two known anonymity protocols. We develop a monotonicity
        principle for the capacity, and use it to show a number of results for binary
        channels in the context of algebraic information theory. We then study the
        probability of error for the attacker in the context of Bayesian inference,
        showing that it is a piecewise linear function and using this fact to improve
        known bounds from the literature. Finally we study a problem that arises when we
        combine probabilities with nondeterminism, where the scheduler is too powerful
        even for trivially secure protocols. We propose a process calculus which allows
        to express restrictions to the scheduler, and we use it in the analysis of an
        anonymity and a contract-signing protocol}, 
  url = {http://www.win.tue.nl/~kostas/these/}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/kostas-thesis.pdf}, 
%%%%% ERROR: Missing field
% www_section = {?????},
}
kutzner03connecting
@conference{kutzner03connecting,
  title = {Connecting Vehicle Scatternets by Internet-Connected Gateways}, 
  author = {Kendy Kutzner and Jean-Jacques Tchouto and Marc Bechler and Lars Wolf and Bernd
        Bochow and Thomas Luckenbach}, 
  booktitle = {Workshop on Multiradio Multimedia Communications MMC 2003}, 
  year = {2003}, 
  address = {University of Dortmund, Germany}, 
  type = {publication}, 
  abstract = {This paper presents an approach for interconnecting isolated clouds of an ad
        hoc network that form a scatternet topology using Internet gateways as
        intermediate nodes. The architecture developed is intended to augment FleetNet, a
        highly dynamic ad hoc network for inter-vehicle communications. This is achieved
        by upgrading FleetNet capabilities to establish a communication path between
        moving vehicles and the Internet via Internet gateways to facilitate direct
        gateway to gateway communications via the Internet, thus bridging gaps in the
        network topology and relaying packets closer towards their geographical
        destination at the same time. After outlining the overall FleetNet approach and
        its underlying geographical multi-hop routing, we focus on the FleetNet gateway
        architecture. We describe required modifications to the gateway architecture and
        to the FleetNet network layer in order to use these gateways as intermediate
        nodes for FleetNet routing. Finally, we conclude the paper by a short discussion
        on the prototype gateway implementation and by summarizing first results and
        ongoing work on inter scatternet communication}, 
  url = {http://i30www.ira.uka.de/research/publications/p2p/}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/kutzner03connecting.pdf},
%%%%% ERROR: Missing field
% www_section = {?????},
}
kutzner05autonomic
@conference{kutzner05autonomic,
  title = {Towards Autonomic Networking using Overlay Routing Techniques}, 
  author = {Kendy Kutzner and Thomas Fuhrmann}, 
  booktitle = {Proceedings of the 18th International Conference on Architecture of
        Computing Systems (ARCS '05)--System Aspects in Organic and Pervasive Computing}, 
  organization = {Springer Berlin / Heidelberg}, 
  year = {2005}, 
  address = {Innsbruck, Austria}, 
  pages = {222--235}, 
  publisher = {Springer Berlin / Heidelberg}, 
  type = {publication}, 
  abstract = {With an ever-growing number of computers being embedded into our
        surroundings, the era of ubiquitous computing is approaching fast. However, as
        the number of networked devices increases, so does system complexity. Contrary to
        the goal of achieving an invisible computer, the required amount of management
        and human intervention increases more and more, both slowing down the growth rate
        and limiting the achievable size of ubiquitous systems. In this paper we present
        a novel routing approach that is capable of handling complex networks without any
        administrative intervention. Based on a combination of standard overlay routing
        techniques and source routes, this approach is capable of efficiently
        bootstrapping a routable network. Unlike other approaches that try to combine
        peer-to-peer ideas with ad-hoc networks, sensor networks, or ubiquitous systems,
        our approach is not based on a routing scheme. This makes the resulting system
        flexible and powerful with respect at application support as well as efficient
        with regard to routing overhead and system complexity}, 
  www_section = {autonomous systems, overlay networks, P2P}, 
  isbn = {978-3-540-25273-3}, 
  doi = {10.1007/b106632}, 
  url = {http://i30www.ira.uka.de/research/publications/p2p/}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/kutzner05autonomic.pdf},
}
kutzner05dvdr
@conference{kutzner05dvdr,
  title = {A Self-Organizing Job Scheduling Algorithm for a Distributed VDR}, 
  author = {Kendy Kutzner and Cramer, Curt and Thomas Fuhrmann}, 
  booktitle = {Workshop "Peer-to-Peer-Systeme und -Anwendungen", 14. Fachtagung
        Kommunikation in Verteilten Systemen (KiVS 2005)}, 
  year = {2005}, 
  address = {Kaiserslautern, Germany}, 
  type = {publication}, 
  abstract = {In [CKF04], we have reported on our concept of a peer-to-peer extension to
        the popular video disk recorder (VDR) [Sch04], the Distributed Video Disk
        Recording (DVDR) system. The DVDR is a collaboration system of existing video
        disk recorders via a peer to peer network. There, the VDRs communicate about the
        tasks to be done and distribute the recordings afterwards. In this paper, we
        report on lessons learnt during its implementation and explain the considerations
        leading to the design of a new job scheduling algorithm. DVDR is an application
        which is based on a distributed hash table (DHT) employing proximity route
        selection (PRS)/proximity neighbor selection (PNS). For our implementation, we
        chose to use Chord [SMK + 01, GGG + 03]. Using a DHT with PRS/PNS yields two
        important features: (1) Each hashed key is routed to exactly one destination node
        within the system. (2) PRS/PNS forces messages originating in one region of the
        network destined to the same key to be routed through exactly one node in that
        region (route convergence). The first property enables per-key aggregation trees
        with a tree being rooted at the node which is responsible for the respective key.
        This node serves as a rendezvous point. The second property leads to locality
        (i.e., low latency) in this aggregation tree}, 
  www_section = {Chord, distributed hash table, proximity neighbor selection}, 
  url = {http://i30www.ira.uka.de/research/publications/p2p/}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/kutzner05dvdr.pdf}, 
}
kutzner05overnet
@conference{kutzner05overnet,
  title = {Measuring Large Overlay Networks--The Overnet Example}, 
  author = {Kendy Kutzner and Thomas Fuhrmann}, 
  booktitle = {Konferenzband der 14. Fachtagung Kommunikation in Verteilten Systemen (KiVS
        2005)}, 
  year = {2005}, 
  address = {Kaiserslautern, Germany}, 
  type = {publication}, 
  abstract = {Peer-to-peer overlay networks have grown significantly in size and
        sophistication over the last years. Meanwhile, distributed hash tables (DHT)
        provide efficient means to create global scale overlay networks on top of which
        various applications can be built. Although filesharing still is the most
        prominent example, other applications are well conceivable. In order to
        rationally design such applications, it is important to know (and understand) the
        properties of the overlay networks as seen from the respective application. This
        paper reports the results from a two week measurement of the entire Overnet
        network, the currently most widely deployed DHT-based overlay. We describe both,
        the design choices that made that measurement feasible and the results from the
        measurement itself. Besides the basic determination of network size, node
        availability and node distribution, we found unexpected results for the overlay
        latency distribution}, 
  www_section = {distributed hash table, overlay networks, P2P}, 
  isbn = {978-3-540-24473-8}, 
  doi = {10.1007/b138861}, 
  url = {http://i30www.ira.uka.de/research/publications/p2p/}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/kutzner05overnet.pdf}, 
}
kutzner06igor
@conference{kutzner06igor,
  title = {The IGOR File System for Efficient Data Distribution in the GRID}, 
  author = {Kendy Kutzner and Thomas Fuhrmann}, 
  booktitle = {Proceedings of the Cracow Grid Workshop CGW 2006}, 
  year = {2006}, 
  address = {Cracow, Poland}, 
  abstract = {Many GRID applications such as drug discovery in the pharmaceutical industry
        or simulations in meteorology and generally in the earth sciences rely on large
        data bases. Historically, these data bases are flat files on the order of several
        hundred megabytes each. Today, sites often need to download dozens or hundreds of
        such files before they can start a simulation or analysis run, even if the
        respective application accesses only small fractions of the respective files. The
        IGOR file system (which has been developed within the EU FP6 SIMDAT project),
        addresses the need for an easy and efficient way to access large files across the
        Internet. IGOR-FS is especially suited for (potentially globally) distributed
        sites that read or modify only small portions of the files. IGOR-FS provides fine
        grained versioning and backup capabilities; and it is built on strong
        cryptography to protect confidential data both in the network and on the local
        sites storage systems}, 
  www_section = {file systems, GRID}, 
  isbn = {83-915141-7-X}, 
  affiliation = {University of Karlsruhe, Germany}, 
  url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.68.1091}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/kutzner06igor.pdf}, 
}
kutzner06securessr
@conference{kutzner06securessr,
  title = {Securing the Scalable Source Routing Protocol}, 
  author = {Kendy Kutzner and Christian Wallenta and Thomas Fuhrmann}, 
  booktitle = {Proceedings of the World Telecommunications Congress 2006}, 
  year = {2006}, 
  address = {Budapest, Hungary}, 
  type = {publication}, 
  abstract = {The Scalable Source Routing (SSR) protocol combines overlay-like routing in a
        virtual network structure with source routing in the physical network to a single
        cross-layer architecture. Thereby, it can provide indirect routing in networks
        that lack a well-crafted structure. SSR is well suited for mobile ad hoc
        networks, sensor-actuator networks, and especially for mesh networks. Moreover,
        SSR directly provides the routing semantics of a structured routing overlay,
        making it an efficient basis for the scalable implementation of fully
        decentralized applications. In this paper we analyze SSR with regard to security:
        We show where SSR is prone to attacks, and we describe protocol modifications
        that make SSR robust in the presence of malicious nodes. The core idea is to
        introduce cryptographic certificates that allow nodes to discover forged protocol
        messages. We evaluate our proposed modifications by means of simulations, and
        thus demonstrate that they are both effective and efficient}, 
  www_section = {cryptography, scalable source routing, sensor networks}, 
  url = {http://i30www.ira.uka.de/research/publications/p2p/}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/kutzner06securessr.pdf},
}
kutzner07linearization
@conference{kutzner07linearization,
  title = {Using Linearization for Global Consistency in SSR}, 
  author = {Kendy Kutzner and Thomas Fuhrmann}, 
  booktitle = {Proceedings of the 4th Int. IEEE Workshop on Hot Topics in P2P Systems}, 
  year = {2007}, 
  address = {Long Beach, CA}, 
  type = {publication}, 
  abstract = {Novel routing algorithms such as scalable source routing (SSR) and virtual
        ring routing (VRR) need to set up and maintain a virtual ring structure among all
        the nodes in the network. The iterative successor pointer rewiring protocol
        (ISPRP) is one way to bootstrap such a network. Like its VRR-analogon, ISPRP
        requires one of the nodes to flood the network to guarantee consistency. Recent
        results on self-stabilizing algorithms now suggest a new approach to bootstrap
        the virtual rings of SSR and VRR. This so-called linearization method does not
        require any flooding at all. Moreover, it has been shown that linearization with
        shortcut neighbors has on average polylogarithmic convergence time, only}, 
  www_section = {scalable source routing}, 
  url = {http://i30www.ira.uka.de/research/publications/p2p/}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/kutzner07linearization.pdf},
}
langos02
@conference{langos02,
  title = {Dummy Traffic Against Long Term Intersection Attacks}, 
  author = {Oliver Berthold and Heinrich Langos}, 
  booktitle = {Proceedings of Privacy Enhancing Technologies workshop (PET 2002)}, 
  organization = {Springer-Verlag, LNCS 2482}, 
  year = {2002}, 
  month = {April}, 
  editor = {Roger Dingledine and Paul Syverson}, 
  publisher = {Springer-Verlag, LNCS 2482}, 
  abstract = {In this paper we propose a method to prevent so called
        {\textquotedblleft}intersection attacks{\textquotedblright} on anonymity
        services. Intersection attacks are possible if not all users of such a service
        are active all the time and part of the transfered messages are linkable.
        Especially in real systems, the group of users (anonymity set) will change over
        time due to online and off-line periods. Our proposed solution is to send
        pregenerated dummy messages to the communication partner (e.g. the web server),
        during the user's off-line periods. For a detailed description of our method we
        assume a cascade of Chaumian MIXes as anonymity service and respect and fulfill
        the MIX attacker model}, 
  www_section = {anonymity service, intersection attacks}, 
  isbn = {978-3-540-00565-0}, 
  doi = {10.1007/3-540-36467-6}, 
  url = {http://www.springerlink.com/content/66ybualwu5hmh563/}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/langos02.pdf}, 
}
le2005
@article{le2005,
  title = {Reading File Metadata with extract and libextractor}, 
  author = {Christian Grothoff}, 
  journal = {Linux Journal}, 
  volume = {6-2005}, 
  year = {2005}, 
  month = {June}, 
  publisher = {SCC}, 
  www_section = {GNUnet, keywords, libextractor, metadata, search}, 
  url = {http://www.linuxjournal.com/article/7552}, 
}
limits-open
@conference{limits-open,
  title = {Limits of Anonymity in Open Environments}, 
  author = {Dogan Kesdogan and Dakshi Agrawal and Stefan Penz}, 
  booktitle = {Proceedings of Information Hiding Workshop (IH 2002)}, 
  organization = {Springer-Verlag, LNCS 2578}, 
  year = {2002}, 
  month = {October}, 
  editor = {Fabien Petitcolas}, 
  publisher = {Springer-Verlag, LNCS 2578}, 
  abstract = {A user is only anonymous within a set of other users. Hence, the core
        functionality of an anonymity providing technique is to establish an anonymity
        set. In open environments, such as the Internet, the established anonymity sets
        in the whole are observable and change with every anonymous communication. We use
        this fact of changing anonymity sets and present a model where we can determine
        the protection limit of an anonymity technique, i.e. the number of observations
        required for an attacker to break uniquely a given anonymity technique. In this
        paper, we use the popular MIX method to demonstrate our attack. The MIX method
        forms the basis of most of the today's deployments of anonymity services (e.g.
        Freedom, Onion Routing, Webmix). We note that our approach is general and can be
        applied equally well to other anonymity providing techniques}, 
  www_section = {anonymity measurement, attack, mix}, 
  url = {http://portal.acm.org/citation.cfm?id=731881}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/limits-open.pdf}, 
}
loesing2008performance
@conference{loesing2008performance,
  title = {Performance Measurements and Statistics of Tor Hidden Services}, 
  author = {Karsten Loesing and Werner Sandmann and Christian Wilms and Guido Wirtz}, 
  booktitle = {Proceedings of the 2008 International Symposium on Applications and the
        Internet (SAINT)}, 
  organization = {IEEE CS Press}, 
  year = {2008}, 
  month = {July}, 
  address = {Turku, Finland}, 
  publisher = {IEEE CS Press}, 
  abstract = {Tor (The Onion Routing) provides a secure mechanism for offering TCP-based
        services while concealing the hidden server's IP address. In general the
        acceptance of services strongly relies on its QoS properties. For potential Tor
        users, provided the anonymity is secured, probably the most important QoS
        parameter is the time until they finally get response by such a hidden service.
        Internally, overall response times are constituted by several steps invisible for
        the user. We provide comprehensive measurements of all relevant latencies and a
        detailed statistical analysis with special focus on the overall response times.
        Thereby, we gain valuable insights that enable us to give certain statistical
        assertions and to suggest improvements in the hidden service protocol and its
        implementation}, 
  www_section = {anonymity, performance, privacy, statistical analysis}, 
  isbn = {978-0-7695-3297-4}, 
  doi = {10.1109/SAINT.2008.69}, 
  url = {http://portal.acm.org/citation.cfm?id=1441426.1441996}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/loesing2008performance.pdf},
}
lurchi-2017
@mastersthesis{lurchi-2017,
  title = {Improving Voice over GNUnet}, 
  author = {Christian Ulrich}, 
  school = {TU Berlin}, 
  volume = {Bachelor}, 
  year = {2017}, 
  month = {July}, 
  address = {Berlin}, 
  pages = {0--48}, 
  type = {B.S}, 
  abstract = {In contrast to ubiquitous cloud-based solutions the telephony application
        GNUnet conversation provides fully-decentralized, secure voice communication and
        thus impedes mass surveillance. The aim of this thesis is to investigate why
        GNUnet conversation currently provides poor Quality of Experience under typical
        wide area network conditions and to propose optimization measures. After network
        shaping and the initialization of two isolated GNUnet peers had been automated,
        delay measurements were done. With emulated network characteristics network
        delay, cryptography delays and audio codec delays were measured and transmitted
        speech was recorded. An analysis of the measurement results and a subjective
        assessment of the speech recordings revealed that extreme outliers occur in most
        scenarios and impair QoE. Moreover it was shown that GNUnet conversation
        introduces a large delay that confines the environment in which good QoE is
        possible. In the measurement environment at least 23 ms always ocurred of which
        large parts are were caused by cryptography. It was shown that optimization in
        the cryptography part and other components are possible. Finally the conditions
        for currently reaching good QoE were determined and ideas for further
        investigations were presented}, 
  www_section = {CADET, GNUnet, measurement, performance}, 
  www_tags = {selected}, 
  url = {https://bibliography.gnunet.org}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/lurchi-bs-thesis.pdf}, 
}
ma_dold_consensus_21dec2015byzantine
@mastersthesis{ma_dold_consensus_21dec2015byzantine,
  title = {Byzantine Fault Tolerant Set Consensus with Efficient Set Reconciliation}, 
  author = {Florian Dold}, 
  school = {Technische Universit{\"a}t M{\"u}nchen}, 
  volume = {M.S}, 
  year = {2015}, 
  month = {December}, 
  address = {M{\"u}nchen}, 
  pages = {0--69}, 
  type = {Master}, 
  abstract = {Byzantine consensus is a fundamental and well-studied problem in the area of
        distributed system. It requires a group of peers to reach agreement on some
        value, even if a fraction of the peers is controlled by an adversary. This thesis
        proposes set union consensus, an efficient generalization of Byzantine consensus
        from single elements to sets. This is practically motivated by Secure Multiparty
        Computation protocols such as electronic voting, where a large set of elements
        must be collected and agreed upon. Existing practical implementations of
        Byzantine consensus are typically based on state machine replication and not
        well-suited for agreement on sets, since they must process individual agreements
        on all set elements in sequence. We describe and evaluate our implementation of
        set union consensus in GNUnet, which is based on a composition of Eppstein set
        reconciliation protocol with the simple gradecast consensus prococol described by
        Ben-Or}, 
  www_section = {byzantine consensus, GNUnet, secure multiparty computation, set
        reconciliation, voting}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/ma_dold_consensus_21dec2015.pdf},
  www_tags = {selected}, 
  url = {https://bibliography.gnunet.org}, 
}
ma_kirsch_2014_0
@mastersthesis{ma_kirsch_2014_0,
  title = {Improved Kernel-Based Port-Knocking in Linux}, 
  author = {Julian Kirsch}, 
  volume = {M.S}, 
  year = {2014}, 
  month = {August}, 
  type = {Master's}, 
  abstract = {Port scanning is used to discover vulnerable services and launch attacks
        against network infrastructure. Port knocking is a well-known technique to hide
        TCP servers from port scanners. This thesis presents the design of TCP Stealth, a
        socket option to realize new port knocking variant with improved security and
        usability compared to previous designs. TCP Stealth replaces the traditional
        random TCP SQN number with a token that authenticates the client and (optionally)
        the first bytes of the TCP payload. Clients and servers can enable TCP Stealth by
        explicitly setting a socket option or linking against a library that wraps
        existing network system calls. This thesis also describes Knock, a free software
        implementation of TCP Stealth for the Linux kernel and {\tt libknockify}, a
        shared library that wraps network system calls to activate Knock on GNU/Linux
        systems, allowing administrators to deploy Knock without recompilation. Finally,
        we present experimental results demonstrating that TCP Stealth is compatible with
        most existing middleboxes on the Internet}, 
  www_section = {Unsorted}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/ma_kirsch_2014_0.pdf}, 
  url = {https://bibliography.gnunet.org}, 
}
maymounkov:rateless
@conference{maymounkov:rateless,
  title = {Rateless Codes and Big Downloads}, 
  author = {Petar Maymounkov and David Mazi{\`e}res}, 
  booktitle = {IPTPS'03--Proceedings in the 2th International Workshop on Peer-to-Peer
        Systems}, 
  organization = {Springer}, 
  volume = {2735}, 
  year = {2003}, 
  month = {February}, 
  address = {Berkeley, CA, USA}, 
  pages = {247--255}, 
  publisher = {Springer}, 
  series = {Lecture Notes in Computer Science}, 
  abstract = {This paper presents a novel algorithm for downloading big files from multiple
        sources in peer-to-peer networks. The algorithm is simple, but offers several
        compelling properties. It ensures low hand-shaking overhead between peers that
        download files (or parts of files) from each other. It is computationally
        efficient, with cost linear in the amount of data transfered. Most importantly,
        when nodes leave the network in the middle of uploads, the algorithm minimizes
        the duplicate information shared by nodes with truncated downloads. Thus, any two
        peers with partial knowledge of a given file can almost always fully benefit from
        each other's knowledge. Our algorithm is made possible by the recent introduction
        of linear-time, rateless erasure codes}, 
  www_section = {algorithms, big files, download, multiple sources, rateless code}, 
  doi = {10.1007/978-3-540-45172-3_23}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/IPTPS\%2703\%20-\%20Rateless\%20codes\%20and\%20big\%20downloads.pdf},
}
mcb-en2015
@article{mcb-en2015,
  title = {NSA's MORECOWBELL: Knell for DNS}, 
  author = {Christian Grothoff and Matthias Wachs and Monika Ermert and Jacob Appelbaum}, 
  journal = {unknown}, 
  institution = {GNUnet e.V}, 
  year = {2015}, 
  month = {January}, 
  address = {M{\"u}nchen}, 
  www_section = {DNS, DNSSEC, MORECOWBELL, NAMECOIN, TLS}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/mcb-en.pdf}, 
  www_tags = {selected}, 
  url = {https://bibliography.gnunet.org}, 
}
mcb-es2015
@article{mcb-es2015,
  title = {El programa MORECOWBELL de la NSA: Doblan las campanas para el DNS}, 
  author = {Christian Grothoff and Matthias Wachs and Monika Ermert and Jacob Appelbaum}, 
  journal = {unknown}, 
  institution = {GNUnet e.V}, 
  year = {2015}, 
  month = {January}, 
  address = {M{\"u}nchen}, 
  www_section = {DNS, DNSSEC, MORECOWBELL, NAMECOIN}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/mcb-es.pdf}, 
  www_tags = {selected}, 
  url = {https://bibliography.gnunet.org}, 
}
mcb-fr2015
@article{mcb-fr2015,
  title = {Le programme MORECOWBELL de la NSA Sonne le glas du NSA}, 
  author = {Christian Grothoff and Matthias Wachs and Monika Ermert and Jacob Appelbaum and
        Ludovic Court{\`e}s}, 
  journal = {unknown}, 
  institution = {GNUnet e.V}, 
  year = {2015}, 
  month = {January}, 
  address = {M{\"u}nchen}, 
  www_section = {DNS, DNSSEC, MORECOWBELL, NAMECOIN}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/mcb-fr.pdf}, 
  www_tags = {selected}, 
  url = {https://bibliography.gnunet.org}, 
}
mcb-it2015
@article{mcb-it2015,
  title = {Il programma MORECOWBELL della NSA: Campane a morto per il DNS}, 
  author = {Christian Grothoff and Matthias Wachs and Monika Ermert and Jacob Appelbaum and
        Luca Saiu}, 
  journal = {unknown}, 
  institution = {GNUnet e.V}, 
  year = {2015}, 
  month = {January}, 
  address = {M{\"u}nchen}, 
  www_section = {DNS, DNSSEC, MORECOWBELL, NAMECOIN}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/mcb-it.pdf}, 
  www_tags = {selected}, 
  url = {https://bibliography.gnunet.org}, 
}
mccoy-pet2008
@conference{mccoy-pet2008,
  title = {Shining Light in Dark Places: Understanding the Tor Network}, 
  author = {Damon McCoy and Kevin Bauer and Dirk Grunwald and Tadayoshi Kohno and Douglas
        Sicker}, 
  booktitle = {Proceedings of the Eighth International Symposium on Privacy Enhancing
        Technologies (PETS 2008)}, 
  organization = {Springer}, 
  year = {2008}, 
  month = {July}, 
  address = {Leuven, Belgium}, 
  pages = {63--76}, 
  editor = {Borisov, Nikita and Ian Goldberg}, 
  publisher = {Springer}, 
  abstract = {To date, there has yet to be a study that characterizes the usage of a real
        deployed anonymity service. We present observations and analysis obtained by
        participating in the Tor network. Our primary goals are to better understand Tor
        as it is deployed and through this understanding, propose improvements. In
        particular, we are interested in answering the following questions: (1) How is
        Tor being used? (2) How is Tor being mis-used? (3) Who is using Tor? To sample
        the results, we show that web traffic makes up the majority of the connections
        and bandwidth, but non-interactive protocols consume a disproportionately large
        amount of bandwidth when compared to interactive protocols. We provide a survey
        of how Tor is being misused, both by clients and by Tor router operators. In
        particular, we develop a method for detecting exit router logging (in certain
        cases). Finally, we present evidence that Tor is used throughout the world, but
        router participation is limited to only a few countries}, 
  www_section = {anonymity, Tor}, 
  isbn = {978-3-540-70629-8}, 
  doi = {10.1007/978-3-540-70630-4_5}, 
  url = {http://portal.acm.org/citation.cfm?id=1428264}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/mccoy-pet2008.pdf}, 
}
mitkuro
@conference{mitkuro,
  title = {Attack for Flash MIX}, 
  author = {Masashi Mitomo and Kaoru Kurosawa}, 
  booktitle = {Proceedings of ASIACRYPT 2000}, 
  organization = {Springer-Verlag, LNCS 1976}, 
  year = {2000}, 
  publisher = {Springer-Verlag, LNCS 1976}, 
  abstract = {AMIX net takes a list of ciphertexts (c 1,... , c N) and outputs a permuted
        list of the plaintexts (m 1,... ,m N) without revealing the relationship between
        (c 1,... , c N) and (m 1,... ,m N). This paper shows that the Jakobsson's flash
        MIX of PODC'99, which was believed to be the most efficient robust MIX net, is
        broken. The first MIX server can prevent computing the correct output with
        probability 1 in our attack. We also present a countermeasure for our attack}, 
  isbn = {3-540-41404-5}, 
  doi = {10.1007/3-540-44448-3_15}, 
  url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.20.6972}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.20.6972.pdf}, 
%%%%% ERROR: Missing field
% www_section = {?????},
}
mix-acc
@conference{mix-acc,
  title = {A Reputation System to Increase MIX-net Reliability}, 
  author = {Roger Dingledine and Michael J. Freedman and David Hopwood and David Molnar}, 
  booktitle = {Proceedings of Information Hiding Workshop (IH 2001)}, 
  organization = {Springer-Verlag, LNCS 2137}, 
  year = {2001}, 
  month = {April}, 
  pages = {126--141}, 
  editor = {Ira S. Moskowitz}, 
  publisher = {Springer-Verlag, LNCS 2137}, 
  abstract = {We describe a design for a reputation system that increases the reliability
        and thus efficiency of remailer services. Our reputation system uses a MIX-net in
        which MIXes give receipts for intermediate messages. Together with a set of
        witnesses, these receipts allow senders to verify the correctness of each MIX and
        prove misbehavior to the witnesses}, 
  www_section = {remailer}, 
  isbn = {978-3-540-42733-9}, 
  doi = {10.1007/3-540-45496-9}, 
  url = {http://www.springerlink.com/content/ej8qv86wdkeukjc5/}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/mix-acc.pdf}, 
}
mixmaster-reliable
@conference{mixmaster-reliable,
  title = {Comparison between two practical mix designs}, 
  author = {Claudia Diaz and Len Sassaman and Evelyne Dewitte}, 
  booktitle = {Proceedings of ESORICS 2004}, 
  year = {2004}, 
  month = {September}, 
  address = {France}, 
  series = {LNCS}, 
  abstract = {We evaluate the anonymity provided by two popular email mix implementations,
        Mixmaster and Reliable, and compare their effectiveness through the use of
        simulations which model the algorithms used by these mixing applications. Our
        simulations are based on actual traffic data obtained from a public anonymous
        remailer (mix node). We determine that assumptions made in previous literature
        about the distribution of mix input traffic are incorrect: in particular, the
        input traffic does not follow a Poisson distribution. We establish for the first
        time that a lower bound exists on the anonymity of Mixmaster, and discover that
        under certain circumstances the algorithm used by Reliable provides no anonymity.
        We find that the upper bound on anonymity provided by Mixmaster is slightly
        higher than that provided by Reliable. We identify flaws in the software in
        Reliable that further compromise its ability to provide anonymity, and review key
        areas that are necessary for the security of a mix in addition to a sound
        algorithm. Our analysis can be used to evaluate under which circumstances the two
        mixing algorithms should be used to best achieve anonymity and satisfy their
        purpose. Our work can also be used as a framework for establishing a security
        review process for mix node deployments}, 
  isbn = {978-3-540-22987-2}, 
  doi = {10.1007/b100085}, 
  url = {http://www.springerlink.com/content/7lvqwn445ty1c7ga/}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/mixmaster-reliable.pdf},
%%%%% ERROR: Missing field
% www_section = {?????},
}
mixmaster-spec
@booklet{mixmaster-spec,
  title = {Mixmaster Protocol --- Version 2}, 
  author = {Ulf M{\"o}ller and Lance Cottrell and Peter Palfrader and Len Sassaman}, 
  year = {2003}, 
  month = {July}, 
  abstract = {Most e-mail security protocols only protect the message body, leaving useful
        information such as the the identities of the conversing parties, sizes of
        messages and frequency of message exchange open to adversaries. This document
        describes Mixmaster (version 2), a mail transfer protocol designed to protect
        electronic mail against traffic analysis. Mixmaster is based on D. Chaum's
        mix-net protocol. A mix (remailer) is a service that forwards messages, using
        public key cryptography to hide the correlation between its inputs and outputs.
        Sending messages through sequences of remailers achieves anonymity and
        unobservability of communications against a powerful adversary}, 
  www_section = {electronic mail, public key cryptography, traffic analysis}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/freehaven.net-anonbib-cache-mixmaster-spec.txt.pdf},
}
mmsec04-Klonowski
@conference{mmsec04-Klonowski,
  title = {DUO--Onions and Hydra--Onions -- Failure and Adversary Resistant Onion
        Protocols}, 
  author = {Jan Iwanik and Marek Klonowski and Miroslaw Kutylowski}, 
  booktitle = {Proceedings of the IFIP TC-6 TC-11 Conference on Communications and
        Multimedia Security 2004}, 
  organization = {Springer Boston}, 
  year = {2004}, 
  month = {September}, 
  publisher = {Springer Boston}, 
  abstract = {A serious weakness of the onion protocol, one of the major tools for
        anonymous communication, is its vulnerability to network failures and/or an
        adversary trying to break the communication. This is facilitated by the fact that
        each message is sent through a path of a certain length and a failure in a single
        point of this path prohibits message delivery. Since the path cannot be too short
        in order to offer anonymity protection (at least logarithmic in the number of
        nodes), the failure probability might be quite substantial. The simplest solution
        to this problem would be to send many onions with the same message. We show that
        this approach can be optimized with respect to communication overhead and
        resilience to failures and/or adversary attacks. We propose two protocols: the
        first one mimics K independent onions with a single onion. The second protocol is
        designed for the case where an adaptive adversary may destroy communication going
        out of servers chosen according to the traffic observed by him. In this case a
        single message flows in a stream of K onions {\textemdash} the main point is that
        even when the adversary kills some of these onions, the stream quickly recovers
        to the original bandwidth {\textemdash} again K onions with this message would
        flow through the network}, 
  www_section = {adaptive adversary, anonymity, onion routing}, 
  isbn = {978-0-387-24485-3}, 
  doi = {10.1007/b105674}, 
  url = {http://www.springerlink.com/content/019lu6xp5b9fctn8/}, 
}
modular-approach
@article{modular-approach,
  title = {Information Hiding, Anonymity and Privacy: A Modular Approach}, 
  author = {Dominic Hughes and Vitaly Shmatikov}, 
  journal = {Journal of Computer Security}, 
  volume = {12}, 
  number = {1}, 
  year = {2004}, 
  pages = {3--36}, 
  abstract = {We propose a new specification framework for information hiding properties
        such as anonymity and privacy. The framework is based on the concept of a
        function view, which is a concise representation of the attacker's partial
        knowledge about a function. We describe system behavior as a set of functions,
        and formalize different information hiding properties in terms of views of these
        functions. We present an extensive case study, in which we use the function view
        framework to systematically classify and rigorously define a rich domain of
        identity-related properties, and to demonstrate that privacy and anonymity are
        independent. The key feature of our approach is its modularity. It yields
        precise, formal specifications of information hiding properties for any protocol
        formalism and any choice of the attacker model as long as the latter induce an
        observational equivalence relation on protocol instances. In particular,
        specifications based on function views are suitable for any cryptographic process
        calculus that defines some form of indistinguishability between processes. Our
        definitions of information hiding properties take into account any feature of the
        security model, including probabilities, random number generation, timing, etc.,
        to the extent that it is accounted for by the formalism in which the system is
        specified}, 
  www_section = {anonymity, information hiding, privacy}, 
  issn = {0926-227X}, 
  url = {http://portal.acm.org/citation.cfm?id=1297694}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/shmat_anon.pdf}, 
}
moin:tel-00724121
@mastersthesis{moin:tel-00724121,
  title = {Recommendation and Visualization Techniques for Large Scale Data}, 
  author = {Moin, Afshin}, 
  school = {Universit{\'e} Rennes 1}, 
  year = {2012}, 
  month = {July}, 
  type = {phd}, 
  www_section = {Unsorted}, 
  url = {https://bibliography.gnunet.org}, 
}
morales2014cryogenic
@mastersthesis{morales2014cryogenic,
  title = {Cryogenic: Enabling Power-Aware Applications on Linux}, 
  author = {Alejandra Morales}, 
  school = {Technische Universit{\"a}t M{\"u}nchen}, 
  volume = {M. Sc}, 
  year = {2014}, 
  month = {February}, 
  address = {Garching bei M{\"u}nchen}, 
  pages = {0--106}, 
  type = {Masters}, 
  abstract = {As a means of reducing power consumption, hardware devices are capable to
        enter into sleep-states that have low power consumption. Waking up from those
        states in order to return to work is typically a rather energy-intensive
        activity. Some existing applications have non-urgent tasks that currently force
        hardware to wake up needlessly or prevent it from going to sleep. It would be
        better if such non-urgent activities could be scheduled to execute when the
        respective devices are active to maximize the duration of sleep-states. This
        requires cooperation between applications and the kernel in order to determine
        when the execution of a task will not be expensive in terms of power consumption.
        This work presents the design and implementation of Cryogenic, a POSIX-compatible
        API that enables clustering tasks based on the hardware activity state.
        Specifically, Cryogenic's API allows applications to defer their execution until
        other tasks use the device they want to use. As a result, two actions that
        contribute to reduce the device energy consumption are achieved: reduce the
        number of hardware wake-ups and maximize the idle periods. The energy
        measurements enacted at the end of this thesis demonstrate that, for the specific
        setup and conditions present during our experimentation, Cryogenic is capable to
        achieve savings between 1\% and 10\% for a USB WiFi device. Although we ideally
        target mobile platforms, Cryogenic has been developed by means a new Linux module
        that integrates with the existing POSIX event loop system calls. This allows to
        use Cryogenic on many different platforms as long as they use a GNU/Linux
        distribution as the main operating system. An evidence of this can be found in
        this thesis, where we demonstrate the power savings on a single-board computer}, 
  www_section = {cooperative, cryogenic, GNUnet, Linux, POSIX, power}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/morales2014cryogenic.pdf},
  www_tags = {selected}, 
  url = {https://bibliography.gnunet.org}, 
}
morphing09
@conference{morphing09,
  title = {Traffic Morphing: An efficient defense against statistical traffic analysis}, 
  author = {Charles Wright and Scott Coull and Fabian Monrose}, 
  booktitle = {Proceedings of the Network and Distributed Security Symposium--{NDSS} '09}, 
  organization = {IEEE}, 
  year = {2009}, 
  month = {February}, 
  publisher = {IEEE}, 
  abstract = {Recent work has shown that properties of network traffic that remain
        observable after encryption, namely packet sizes and timing, can reveal
        surprising information about the traffic's contents (e.g., the language of a VoIP
        call [29], passwords in secure shell logins [20], or even web browsing habits
        [21, 14]). While there are some legitimate uses for encrypted traffic analysis,
        these techniques also raise important questions about the privacy of encrypted
        communications. A common tactic for mitigating such threats is to pad packets to
        uniform sizes or to send packets at fixed timing intervals; however, this
        approach is often inefficient. In this paper, we propose a novel method for
        thwarting statistical traffic analysis algorithms by optimally morphing one class
        of traffic to look like another class. Through the use of convex optimization
        techniques, we show how to optimally modify packets in real-time to reduce the
        accuracy of a variety of traffic classifiers while incurring much less overhead
        than padding. Our evaluation of this technique against two published traffic
        classifiers for VoIP [29] and web traffic [14] shows that morphing works well on
        a wide range of network data{\textemdash}in some cases, simultaneously providing
        better privacy and lower overhead than na{\textasciidieresis}{\i}ve defenses}, 
  www_section = {privacy, traffic analysis, VoIP}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/morphing09.pdf}, 
}
morphmix-fc2004
@conference{morphmix-fc2004,
  title = {Practical Anonymity for the Masses with MorphMix}, 
  author = {Marc Rennhard and Bernhard Plattner}, 
  booktitle = {Proceedings of Financial Cryptography (FC '04)}, 
  organization = {Springer-Verlag, LNCS 3110}, 
  year = {2004}, 
  month = {February}, 
  pages = {233--250}, 
  editor = {Ari Juels}, 
  publisher = {Springer-Verlag, LNCS 3110}, 
  abstract = {MorphMix is a peer-to-peer circuit-based mix network to provide practical
        anonymous low-latency Internet access for millions of users. The basic ideas of
        MorphMix have been published before; this paper focuses on solving open problems
        and giving an analysis of the resistance to attacks and the performance it offers
        assuming realistic scenarios with very many users. We demonstrate that MorphMix
        scales very well and can support as many nodes as there are public IP addresses.
        In addition, we show that MorphMix is indeed practical because it provides good
        resistance from long-term profiling and offers acceptable performance despite the
        heterogeneity of the nodes and the fact that nodes can join or leave the system
        at any time}, 
  www_section = {anonymity, P2P}, 
  isbn = {978-3-540-22420-4}, 
  doi = {10.1007/b98935}, 
  url = {http://www.springerlink.com/content/dc1qn54t9ta4u3g1/}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/morphmix-fc2004.pdf}, 
}
morphmix:pet2006
@conference{morphmix:pet2006,
  title = {Breaking the Collusion Detection Mechanism of MorphMix}, 
  author = {Parisa Tabriz and Borisov, Nikita}, 
  booktitle = {Proceedings of the Sixth Workshop on Privacy Enhancing Technologies (PET
        2006)}, 
  organization = {Springer}, 
  year = {2006}, 
  month = {June}, 
  address = {Cambridge, UK}, 
  pages = {368--384}, 
  editor = {George Danezis and Philippe Golle}, 
  publisher = {Springer}, 
  abstract = {MorphMix is a peer-to-peer circuit-based mix network designed to provide
        low-latency anonymous communication. MorphMix nodes incrementally construct
        anonymous communication tunnels based on recommendations from other nodes in the
        system; this P2P approach allows it to scale to millions of users. However, by
        allowing unknown peers to aid in tunnel construction, MorphMix is vulnerable to
        colluding attackers that only offer other attacking nodes in their
        recommendations. To avoid building corrupt tunnels, MorphMix employs a collusion
        detection mechanism to identify this type of misbehavior. In this paper, we
        challenge the assumptions of the collusion detection mechanism and demonstrate
        that colluding adversaries can compromise a significant fraction of all anonymous
        tunnels, and in some cases, a majority of all tunnels built. Our results suggest
        that mechanisms based solely on a node's local knowledge of the network are not
        sufficient to solve the difficult problem of detecting colluding adversarial
        behavior in a P2P system and that more sophisticated schemes may be needed}, 
  www_section = {collusion detection, P2P}, 
  isbn = {978-3-540-68790-0}, 
  doi = {10.1007/11957454}, 
  url = {http://www.springerlink.com/content/p2612108665331n7/}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/morphmix-pet2006.pdf}, 
}
morphmix:wpes2002
@conference{morphmix:wpes2002,
  title = {Introducing MorphMix: Peer-to-Peer based Anonymous Internet Usage with Collusion
        Detection}, 
  author = {Marc Rennhard and Bernhard Plattner}, 
  booktitle = {Proceedings of the Workshop on Privacy in the Electronic Society (WPES
        2002)}, 
  organization = {ACM New York, NY, USA}, 
  year = {2002}, 
  month = {November}, 
  address = {Washington, DC, USA}, 
  publisher = {ACM New York, NY, USA}, 
  abstract = {Traditional mix-based systems are composed of a small set of static, well
        known, and highly reliable mixes. To resist traffic analysis attacks at a mix,
        cover traffic must be used, which results in significant bandwidth overhead.
        End-to-end traffic analysis attacks are even more difficult to counter because
        there are only a few entry-and exit-points in the system. Static mix networks
        also suffer from scalability problems and in several countries, institutions
        operating a mix could be targeted by legal attacks. In this paper, we introduce
        MorphMix, a system for peer-to-peer based anonymous Internet usage. Each MorphMix
        node is a mix and anyone can easily join the system. We believe that MorphMix
        overcomes or reduces several drawbacks of static mix networks. In particular, we
        argue that our approach offers good protection from traffic analysis attacks
        without employing cover traffic. But MorphMix also introduces new challenges. One
        is that an adversary can easily operate several malicious nodes in the system and
        try to break the anonymity of legitimate users by getting full control over their
        anonymous paths. To counter this attack, we have developed a collusion detection
        mechanism, which allows to identify compromised paths with high probability
        before they are being used}, 
  www_section = {collusion detection, legal attack, P2P, traffic analysis}, 
  isbn = {1-58113-633-1}, 
  doi = {10.1145/644527.644537}, 
  url = {http://portal.acm.org/citation.cfm?id=644537}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/morphmix-wpes2002.pdf}, 
}
moscibroda:on
@conference{moscibroda:on,
  title = {On Mechanism Design without Payments for Throughput Maximization}, 
  author = {Thomas Moscibroda and Stefan Schmid}, 
  booktitle = {INFOCOM'09. Proceedings of the 28th IEEE International Conference on
        Computer Communications}, 
  organization = {IEEE Computer Society}, 
  year = {2009}, 
  month = {April}, 
  address = {Rio de Janeiro, Brazil}, 
  pages = {972--980}, 
  publisher = {IEEE Computer Society}, 
  abstract = {It is well-known that the overall efficiency of a distributed system can
        suffer if the participating entities seek to maximize their individual
        performance. Consequently, mechanisms have been designed that force the
        participants to behave more cooperatively. Most of these game-theoretic solutions
        rely on payments between participants. Unfortunately, such payments are often
        cumbersome to implement in practice, especially in dynamic networks and where
        transaction costs are high. In this paper, we investigate the potential of
        mechanisms which work without payments. We consider the problem of throughput
        maximization in multi-channel environments and shed light onto the throughput
        increase that can be achieved with and without payments. We introduce and analyze
        two different concepts: the worst-case leverage where we assume that players end
        up in the worst rational strategy profile, and the average-case leverage where
        player select a random non-dominated strategy. Our theoretical insights are
        complemented by simulations}, 
  www_section = {distributed systems, game-theoretic, individual performance, mechanism
        design, payment, throughtput maximization}, 
  isbn = {978-1-4244-3512-8}, 
  doi = {http://dx.doi.org/10.1109/INFCOM.2009.5062008}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/INFOCOMM\%2709\%20-\%20Mechanism\%20design\%20without\%20payments.pdf},
}
mrkoot:sirer04
@conference{mrkoot:sirer04,
  title = {Eluding carnivores: file sharing with strong anonymity}, 
  author = {Emin G{\"u}n Sirer and Goel, Sharad and Mark Robson and Engin, Dogan}, 
  booktitle = {Proceedings of the 11th Workshop on ACM SIGOPS European Workshop: Beyond the
        PC (EW11)}, 
  organization = {ACM Press}, 
  year = {2004}, 
  address = {New York, NY, USA}, 
  publisher = {ACM Press}, 
  www_section = {anonymity, file-sharing, overlay networks}, 
  doi = {10.1145/1133572.1133611}, 
  url = {http://dx.doi.org/10.1145/1133572.1133611}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/herbivore-esigops.pdf}, 
}
mteich-2017
@mastersthesis{mteich-2017,
  title = {Implementing Privacy Preserving Auction Protocols}, 
  author = {Markus Teich}, 
  school = {TUM}, 
  volume = {Master of Science}, 
  year = {2017}, 
  month = {February}, 
  address = {Munich}, 
  pages = {0--100}, 
  editor = {Totakura, Sree Harsha and Grothoff, Christian and Felix Brandt}, 
  abstract = {In this thesis we translate Brandt's privacy preserving sealed-bid online
        auction protocol from RSA to elliptic curve arithmetic and analyze the
        theoretical and practical benefits. With Brandt's protocol, the auction outcome
        is completely resolved by the bidders and the seller without the need for a
        trusted third party. Loosing bids are not revealed to anyone. We present
        libbrandt, our implementation of four algorithms with different outcome and
        pricing properties, and describe how they can be incorporated in a real-world
        online auction system. Our performance measurements show a reduction of
        computation time and prospective bandwidth cost of over 90\% compared to an
        implementation of the RSA version of the same algorithms. We also evaluate how
        libbrandt scales in different dimensions and conclude that the system we have
        presented is promising with respect to an adoption in the real world}, 
  www_section = {auctions, GNUnet, secure multi-party computation}, 
  www_tags = {selected}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/thesis_0.pdf}, 
  url = {https://bibliography.gnunet.org}, 
}
murdoch-pet2007
@conference{murdoch-pet2007,
  title = {Sampled Traffic Analysis by Internet-Exchange-Level Adversaries}, 
  author = {Steven J. Murdoch and Piotr Zieli{\'n}ski}, 
  booktitle = {Proceedings of the Seventh Workshop on Privacy Enhancing Technologies (PET
        2007)}, 
  organization = {Springer}, 
  year = {2007}, 
  month = {June}, 
  address = {Ottawa, Canada}, 
  editor = {Borisov, Nikita and Philippe Golle}, 
  publisher = {Springer}, 
  abstract = {Existing low-latency anonymity networks are vulnerable to traffic analysis,
        so location diversity of nodes is essential to defend against attacks. Previous
        work has shown that simply ensuring geographical diversity of nodes does not
        resist, and in some cases exacerbates, the risk of traffic analysis by ISPs.
        Ensuring high autonomous-system (AS) diversity can resist this weakness. However,
        ISPs commonly connect to many other ISPs in a single location, known as an
        Internet eXchange (IX). This paper shows that IXes are a single point where
        traffic analysis can be performed. We examine to what extent this is true,
        through a case study of Tor nodes in the UK. Also, some IXes sample packets
        flowing through them for performance analysis reasons, and this data could be
        exploited to de-anonymize traffic. We then develop and evaluate Bayesian traffic
        analysis techniques capable of processing this sampled data}, 
  www_section = {anonymity, Internet exchange, traffic analysis}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/murdoch-pet2007.pdf}, 
}
murdoch-pet2008
@conference{murdoch-pet2008,
  title = {Metrics for Security and Performance in Low-Latency Anonymity Networks}, 
  author = {Steven J. Murdoch and Robert N. M. Watson}, 
  booktitle = {Proceedings of the Eighth International Symposium on Privacy Enhancing
        Technologies (PETS 2008)}, 
  organization = {Springer}, 
  year = {2008}, 
  month = {July}, 
  address = {Leuven, Belgium}, 
  pages = {115--132}, 
  editor = {Borisov, Nikita and Ian Goldberg}, 
  publisher = {Springer}, 
  www_section = {anonymity, Tor}, 
  isbn = {978-3-540-70629-8}, 
  doi = {10.1007/978-3-540-70630-4_8}, 
  url = {http://portal.acm.org/citation.cfm?id=1428259.1428267}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/murdoch-pet2008.pdf}, 
}
mwachs2014
@mastersthesis{mwachs2014,
  title = {A Secure and Resilient Communication Infrastructure for Decentralized Networking
        Applications}, 
  author = {Matthias Wachs}, 
  school = {Technische Universit{\"a}t M{\"u}nchen}, 
  volume = {PhD}, 
  year = {2015}, 
  month = {February}, 
  address = {M{\"u}nchen}, 
  pages = {0--250}, 
  type = {PhD}, 
  abstract = {This thesis provides the design and implementation of a secure and resilient
        communication infrastructure for decentralized peer-to-peer networks. The
        proposed communication infrastructure tries to overcome limitations to
        unrestricted communication on today's Internet and has the goal of
        re-establishing unhindered communication between users. With the GNU name system,
        we present a fully decentralized, resilient, and privacy-preserving alternative
        to DNS and existing security infrastructures}, 
  www_section = {Communication, GNU Name System, GNUnet, P2P, resilience}, 
  www_tags = {selected}, 
  isbn = {3-937201-45-9}, 
  doi = {10.2313/NET-2015-02-1}, 
  url = {http://nbn-resolving.de/urn/resolver.pl?urn:bvb:91-diss-20150225-1231854-0-7}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/NET-2015-02-1.pdf}, 
}
newman:pet2003
@conference{newman:pet2003,
  title = {Metrics for Traffic Analysis Prevention}, 
  author = {Richard E. Newman and Ira S. Moskowitz and Paul Syverson and Andrei Serjantov}, 
  booktitle = {Proceedings of Privacy Enhancing Technologies workshop (PET 2003)}, 
  organization = {Springer-Verlag, LNCS 2760}, 
  year = {2003}, 
  month = {March}, 
  pages = {48--65}, 
  editor = {Roger Dingledine}, 
  publisher = {Springer-Verlag, LNCS 2760}, 
  abstract = {This paper considers systems for Traffic Analysis Prevention (TAP) in a
        theoretical model. It considers TAP based on padding and rerouting of messages
        and describes the effects each has on the difference between the actual and the
        observed traffic matrix (TM). The paper introduces an entropy-based approach to
        the amount of uncertainty a global passive adversary has in determining the
        actual TM, or alternatively, the probability that the actual TM has a property of
        interest. Unlike previous work, the focus is on determining the overall amount of
        anonymity a TAP system can provide, or the amount it can provide for a given cost
        in padding and rerouting, rather than on the amount of protection a afforded
        particular communications}, 
  www_section = {traffic analysis, traffic matrix}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/steinbrecher-pet2003_0.pdf},
}
newman:pet2004
@conference{newman:pet2004,
  title = {Anonymity and Covert Channels in Simple Timed Mix-firewalls}, 
  author = {Richard E. Newman and Vipan R. Nalla and Ira S. Moskowitz}, 
  booktitle = {Proceedings of Privacy Enhancing Technologies workshop (PET 2004)}, 
  organization = {Springer Berlin / Heidelberg}, 
  volume = {3424}, 
  year = {2004}, 
  month = {May}, 
  pages = {1--16}, 
  publisher = {Springer Berlin / Heidelberg}, 
  series = {LNCS}, 
  abstract = {Traditional methods for evaluating the amount of anonymity afforded by
        various Mix configurations have depended on either measuring the size of the set
        of possible senders of a particular message (the anonymity set size), or by
        measuring the entropy associated with the probability distribution of the
        messages possible senders. This paper explores further an alternative way of
        assessing the anonymity of a Mix system by considering the capacity of a covert
        channel from a sender behind the Mix to an observer of the Mix's output. Initial
        work considered a simple model, with an observer (Eve) restricted to counting the
        number of messages leaving a Mix configured as a firewall guarding an enclave
        with one malicious sender (Alice) and some other naive senders (Cluelessi's).
        Here, we consider the case where Eve can distinguish between multiple
        destinations, and the senders can select to which destination their message (if
        any) is sent each clock tick}, 
  isbn = {978-3-540-26203-9}, 
  doi = {10.1007/b136164}, 
  url = {http://www.springerlink.com/content/w256n3dfl6wf2q3m/}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/newman-pet2004.pdf}, 
%%%%% ERROR: Missing field
% www_section = {?????},
}
nguyen2006vsf
@article{nguyen2006vsf,
  title = {Verifiable shuffles: a formal model and a Paillier-based three-round
        construction with provable security}, 
  author = {Lan Nguyen and Rei Safavi-Naini and Kaoru Kurosawa}, 
  journal = {International Journal of Information Security}, 
  volume = {5}, 
  number = {4}, 
  year = {2006}, 
  pages = {241--255}, 
  publisher = {Springer}, 
  abstract = {A shuffle takes a list of ciphertexts and outputs a permuted list of
        re-encryptions of the input ciphertexts. Mix-nets, a popular method for anonymous
        routing, can be constructed from a sequence of shuffles and decryption. We
        propose a formal model for security of verifiable shuffles and a new verifiable
        shuffle system based on the Paillier encryption scheme, and prove its security in
        the proposed dmodel. The model is general and can be extended to provide provable
        security for verifiable shuffle decryption}, 
  www_section = {formal security model, paillier public-key system, privacy, verifiable
        shuffles}, 
  issn = {1615-5262}, 
  doi = {10.1007/s10207-006-0004-8}, 
  url = {http://portal.acm.org/citation.cfm?id=1164438}, 
}
nguyen:pet2003
@conference{nguyen:pet2003,
  title = {Breaking and Mending Resilient Mix-nets}, 
  author = {Lan Nguyen and Rei Safavi-Naini}, 
  booktitle = {Proceedings of Privacy Enhancing Technologies workshop (PET 2003)}, 
  organization = {Springer-Verlag, LNCS 2760}, 
  year = {2003}, 
  month = {March}, 
  pages = {66--80}, 
  editor = {Roger Dingledine}, 
  publisher = {Springer-Verlag, LNCS 2760}, 
  abstract = {In this paper we show two attacks against universally resilient mix-nets. The
        first attack can be used against a number of mix-nets, including Furukawa-Sako01
        [6], Millimix [11], Abe98 [1], MiP-1, MiP-2 [2,3] and Neff01 [19]. We give the
        details of the attack in the case of Furukawa-Sako01 mix-net. The second attack
        breaks the correctness of Millimix [11]. We show how to counter these attacks,
        and give efficiency and security analysis for the proposed countermeasures}, 
  www_section = {attack, security analysis}, 
  isbn = {978-3-540-20610-1}, 
  doi = {10.1007/b94512}, 
  url = {http://www.springerlink.com/content/0e0mwvgyt008wxkf/}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/nguyen-pet2003.pdf}, 
}
nussbaum2008p2plab
@article{nussbaum2008p2plab,
  title = {Lightweight emulation to study peer-to-peer systems}, 
  author = {Nussbaum, Lucas and Richard, Olivier}, 
  journal = {Concurrency and Computation: Practice and Experience}, 
  volume = {20}, 
  number = {6}, 
  year = {2008}, 
  pages = {735--749}, 
  publisher = {John Wiley \& Sons, Ltd}, 
  www_section = {BitTorrent, emulation, evaluation, network, peer-to-peer, virtualization}, 
  issn = {1532-0634}, 
  doi = {10.1002/cpe.1242}, 
  url = {http://dx.doi.org/10.1002/cpe.1242}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/p2plab-cpe.pdf}, 
}
nym-alias-net
@conference{nym-alias-net,
  title = {The Design, Implementation and Operation of an Email Pseudonym Server}, 
  author = {David Mazi{\`e}res and Frans M. Kaashoek}, 
  booktitle = {Proceedings of the 5th ACM Conference on Computer and Communications
        Security (CCS 1998)}, 
  organization = {ACM Press}, 
  year = {1998}, 
  month = {November}, 
  publisher = {ACM Press}, 
  abstract = {Attacks on servers that provide anonymity generally fall into two categories:
        attempts to expose anonymous users and attempts to silence them. Much existing
        work concentrates on withstanding the former, but the threat of the latter is
        equally real. One particularly e$\#$ective attack against anonymous servers is to
        abuse them and stir up enough trouble that they must shut down. This paper
        describes the design, implementation, and operation of nym.alias.net, a server
        providing untraceable email aliases. We enumerate many kinds of abuse the system
        has weathered during two years of operation, and explain the measures we enacted
        in response. From our experiences, we distill several principles by which one can
        protect anonymous servers from similar attacks}, 
  isbn = {1-58113-007-4}, 
  doi = {10.1145/288090.288098}, 
  url = {http://portal.acm.org/citation.cfm?id=288098}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/nym-alias-net.pdf}, 
%%%%% ERROR: Missing field
% www_section = {?????},
}
nymble-tdsc
@article{nymble-tdsc,
  title = {Nymble: Blocking Misbehaving Users in Anonymizing Networks}, 
  author = {Patrick P. Tsang and Apu Kapadia and Cory Cornelius and Sean Smith}, 
  journal = {IEEE Transactions on Dependable and Secure Computing (TDSC)}, 
  year = {2009}, 
  month = {September}, 
  abstract = {Anonymizing networks such as Tor allow users to access Internet services
        privately by using a series of routers to hide the client's IP address from the
        server. The success of such networks, however, has been limited by users
        employing this anonymity for abusive purposes such as defacing popular websites.
        Website administrators routinely rely on IP-address blocking for disabling access
        to misbehaving users, but blocking IP addresses is not practical if the abuser
        routes through an anonymizing network. As a result, administrators block {\em
        all} known exit nodes of anonymizing networks, denying anonymous access to honest
        and dishonest users alike. To address this problem, we present Nymble, a system
        in which \emph{servers can blacklist misbehaving users without compromising their
        anonymity}. Our system is thus agnostic to different servers' definitions of
        misbehavior {\textemdash} servers can block users for whatever reason, and the
        privacy of blacklisted users is maintained}, 
  www_section = {authentication, privacy}, 
  issn = {1545-5971}, 
  doi = {10.1109/TDSC.2009.38}, 
  url = {http://www.computer.org/portal/web/csdl/doi/10.1109/TDSC.2009.38}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/nymble-tdsc.pdf}, 
}
oakland11-formalizing
@conference{oakland11-formalizing,
  title = {Formalizing Anonymous Blacklisting Systems}, 
  author = {Ryan Henry and Ian Goldberg}, 
  booktitle = {Proceedings of the 2011 IEEE Symposium on Security and Privacy}, 
  year = {2011}, 
  month = {May}, 
  address = {San Francisco, CA, USA}, 
  abstract = {Anonymous communications networks, such as Tor, help to solve the real and
        important problem of enabling users to communicate privately over the Internet.
        However, in doing so, anonymous communications networks introduce an entirely new
        problem for the service providers{\textemdash}such as websites, IRC networks or
        mail servers{\textemdash}with which these users interact; in particular, since
        all anonymous users look alike, there is no way for the service providers to hold
        individual misbehaving anonymous users accountable for their actions. Recent
        research efforts have focused on using anonymous blacklisting systems (which are
        sometimes called anonymous revocation systems) to empower service providers with
        the ability to revoke access from abusive anonymous users. In contrast to
        revocable anonymity systems, which enable some trusted third party to deanonymize
        users, anonymous blacklisting systems provide users with a way to authenticate
        anonymously with a service provider, while enabling the service provider to
        revoke access from any users that misbehave, without revealing their identities.
        In this paper, we introduce the anonymous blacklisting problem and survey the
        literature on anonymous blacklisting systems, comparing and contrasting the
        architecture of various existing schemes, and discussing the tradeoffs inherent
        with each design. The literature on anonymous blacklisting systems lacks a
        unified set of definitions; each scheme operates under different trust
        assumptions and provides different security and privacy guarantees. Therefore,
        before we discuss the existing approaches in detail, we first propose a formal
        definition for anonymous blacklisting systems, and a set of security and privacy
        properties that these systems should possess. We also outline a set of new
        performance requirements that anonymous blacklisting systems should satisfy to
        maximize their potential for real-world adoption, and give formal definitions for
        several optional features already supported by some schemes in the literature}, 
  www_section = {anonymity, anonymous blacklisting, authentication, privacy enhancing
        technologies, privacy-enhanced revocation}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Formalizing\%20Anonymous\%20Blacklisting\%20Systems.pdf},
  url = {https://bibliography.gnunet.org}, 
}
oakland2012-lap
@conference{oakland2012-lap,
  title = {LAP: Lightweight Anonymity and Privacy}, 
  author = {Hsu-Chun Hsiao and Tiffany Hyun-Jin Kim and Adrian Perrig and Akira Yamada and
        Sam Nelson and Marco Gruteser and Wei Ming}, 
  booktitle = {Proceedings of the 2012 IEEE Symposium on Security and Privacy}, 
  organization = {IEEE Computer Society}, 
  year = {2012}, 
  month = {May}, 
  address = {San Francisco, CA, USA}, 
  publisher = {IEEE Computer Society}, 
  abstract = {Popular anonymous communication systems often require sending packets through
        a sequence of relays on dilated paths for strong anonymity protection. As a
        result, increased end-to-end latency renders such systems inadequate for the
        majority of Internet users who seek an intermediate level of anonymity protection
        while using latency-sensitive applications, such as Web applications. This paper
        serves to bridge the gap between communication systems that provide strong
        anonymity protection but with intolerable latency and non-anonymous communication
        systems by considering a new design space for the setting. More specifically, we
        explore how to achieve near-optimal latency while achieving an intermediate level
        of anonymity with a weaker yet practical adversary model (i.e., protecting an
        end-host's identity and location from servers) such that users can choose between
        the level of anonymity and usability. We propose Lightweight Anonymity and
        Privacy (LAP), an efficient network-based solution featuring lightweight path
        establishment and stateless communication, by concealing an end-host's
        topological location to enhance anonymity against remote tracking. To show
        practicality, we demonstrate that LAP can work on top of the current Internet and
        proposed future Internet architectures}, 
  www_section = {anonymous communication anonymity protection, LAP}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/LAP\%3A\%20Lightweight\%20Anonymity\%20and\%20Privacy.pdf},
  url = {https://bibliography.gnunet.org}, 
}
oakland2012-lastor
@conference{oakland2012-lastor,
  title = {LASTor: A Low-Latency AS-Aware Tor Client}, 
  author = {Masoud Akhoondi and Curtis Yu and Harsha V. Madhyastha}, 
  booktitle = {Proceedings of the 2012 IEEE Symposium on Security and Privacy}, 
  organization = {IEEE Computer Society}, 
  year = {2012}, 
  month = {May}, 
  address = {San Francisco, CA, USA}, 
  publisher = {IEEE Computer Society}, 
  abstract = {The widely used Tor anonymity network is designed to enable low-latency
        anonymous communication. However, in practice, interactive communication on
        Tor{\textemdash}which accounts for over 90\% of connections in the Tor network
        [1]{\textemdash}incurs latencies over 5x greater than on the direct Internet
        path. In addition, since path selection to establish a circuit in Tor is
        oblivious to Internet routing, anonymity guarantees can breakdown in cases where
        an autonomous system (AS) can correlate traffic across the entry and exit
        segments of a circuit. In this paper, we show that both of these shortcomings in
        Tor can be addressed with only client-side modifications, i.e., without requiring
        a revamp of the entire Tor architecture. To this end, we design and implement a
        new Tor client, LASTor. First, we show that LASTor can deliver significant
        latency gains over the default Tor client by simply accounting for the inferred
        locations of Tor relays while choosing paths. Second, since the preference for
        low latency paths reduces the entropy of path selection, we design LASTor's path
        selection algorithm to be tunable. A user can choose an appropriate tradeoff
        between latency and anonymity by specifying a value between 0 (lowest latency)
        and 1 (highest anonymity) for a single parameter. Lastly, we develop an efficient
        and accurate algorithm to identify paths on which an AS can correlate traffic
        between the entry and exit segments. This algorithm enables LASTor to avoid such
        paths and improve a user's anonymity, while the low runtime of the algorithm
        ensures that the impact on end-to-end latency of communication is low. By
        applying our techniques to measurements of real Internet paths and by using
        LASTor to visit the top 200 websites from several geographically-distributed
        end-hosts, we show that, in comparison to the default Tor client, LASTor reduces
        median latencies by 25\% while also reducing the false negative rate of not
        detecting a potential snooping AS from 57\% to 11\%}, 
  www_section = {anonymous communication, as, autonomous system, Tor}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/LASTor\%3A\%20A\%20Low-Latency\%20AS-Aware\%20Tor\%20Client.pdf},
  url = {https://bibliography.gnunet.org}, 
}
oakland2012-peekaboo
@conference{oakland2012-peekaboo,
  title = {Peek-a-Boo, I Still See You: Why Efficient Traffic Analysis Countermeasures
        Fail}, 
  author = {Kevin P. Dyer and Scott Coull and Thomas Ristenpart and Thomas Shrimpton}, 
  booktitle = {Proceedings of the 2012 IEEE Symposium on Security and Privacy}, 
  organization = {IEEE Computer Society}, 
  year = {2012}, 
  month = {May}, 
  address = {San Francisco, CA, USA}, 
  publisher = {IEEE Computer Society}, 
  abstract = {We consider the setting of HTTP traffic over encrypted tunnels, as used to
        conceal the identity of websites visited by a user. It is well known that traffic
        analysis (TA) attacks can accurately identify the website a user visits despite
        the use of encryption, and previous work has looked at specific
        attack/countermeasure pairings. We provide the first comprehensive analysis of
        general-purpose TA countermeasures. We show that nine known countermeasures are
        vulnerable to simple attacks that exploit coarse features of traffic (e.g., total
        time and bandwidth). The considered countermeasures include ones like those
        standardized by TLS, SSH, and IPsec, and even more complex ones like the traffic
        morphing scheme of Wright et al. As just one of our results, we show that despite
        the use of traffic morphing, one can use only total upstream and downstream
        bandwidth to identify {\textemdash}with 98\% accuracy{\textemdash} which of two
        websites was visited. One implication of what we find is that, in the context of
        website identification, it is unlikely that bandwidth-efficient, general- purpose
        TA countermeasures can ever provide the type of security targeted in prior work}, 
  www_section = {encrypted traffic, machine learning, padding, privacy, traffic analysis
        countermeasures}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Peek-a-Boo\%2C\%20I\%20Still\%20See\%20You\%3A\%20Why\%20Efficient\%20Traffic\%20Analysis\%20Countermeasures\%20Fail.pdf},
  url = {https://bibliography.gnunet.org}, 
}
obfuscation_osn2014
@book{obfuscation_osn2014,
  title = {On the Effectiveness of Obfuscation Techniques in Online Social Networks}, 
  author = {Chen, Terence and Boreli, Roksana and Kaafar, Mohamed-Ali and Friedman, Arik}, 
  booktitle = {Privacy Enhancing Technologies}, 
  organization = {Springer International Publishing}, 
  volume = {8555}, 
  year = {2014}, 
  pages = {42--62}, 
  editor = {De Cristofaro, Emiliano and Murdoch, StevenJ}, 
  publisher = {Springer International Publishing}, 
  series = {Lecture Notes in Computer Science}, 
  abstract = {Data obfuscation is a well-known technique for protecting user privacy
        against inference attacks, and it was studied in diverse settings, including
        search queries, recommender systems, location-based services and Online Social
        Networks (OSNs). However, these studies typically take the point of view of a
        single user who applies obfuscation, and focus on protection of a single target
        attribute. Unfortunately, while narrowing the scope simplifies the problem, it
        overlooks some significant challenges that effective obfuscation would need to
        address in a more realistic setting. First, correlations between attributes imply
        that obfuscation conducted to protect a certain attribute, may influence
        inference attacks targeted at other attributes. In addition, when multiple users
        conduct obfuscation simultaneously, the combined effect of their obfuscations may
        be significant enough to affect the inference mechanism to their detriment. In
        this work we focus on the OSN setting and use a dataset of 1.9 million Facebook
        profiles to demonstrate the severity of these problems and explore possible
        solutions. For example, we show that an obfuscation policy that would limit the
        accuracy of inference to 45\% when applied by a single user, would result in an
        inference accuracy of 75\% when applied by 10\% of the users. We show that a
        dynamic policy, which is continuously adjusted to the most recent data in the
        OSN, may mitigate this problem. Finally, we report the results of a user study,
        which indicates that users are more willing to obfuscate their profiles using
        popular and high quality items. Accordingly, we propose and evaluate an
        obfuscation strategy that satisfies both user needs and privacy protection}, 
  isbn = {978-3-319-08505-0}, 
  doi = {10.1007/978-3-319-08506-7_3}, 
  url = {http://dx.doi.org/10.1007/978-3-319-08506-7_3}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/obfuscation_osn.pdf}, 
  www_section = {Unsorted}, 
}
oehlmann2014machinelearning
@mastersthesis{oehlmann2014machinelearning,
  title = {Machine Learning for Bandwidth Management in Decentralized Networks}, 
  author = {Fabian Oehlmann}, 
  school = {Technische Universit{\"a}t M{\"u}nchen}, 
  volume = {M. Sc}, 
  year = {2014}, 
  month = {February}, 
  address = {Garching bei M{\"u}nchen}, 
  pages = {0--91}, 
  type = {Masters}, 
  abstract = {The successful operation of a peer-to-peer network depends on the resilience
        of its peer's communications. On the Internet, direct connections between peers
        are often limited by restrictions like NATs and traffic filtering. Addressing
        such problems is particularly pressing for peer-to-peer networks that do not wish
        to rely on any trusted infrastructure, which might otherwise help the
        participants establish communication channels. Modern peer-to-peer networks
        employ various techniques to address the problem of restricted connectivity on
        the Internet. One interesting development is that various overlay networks now
        support multiple communication protocols to improve resilience and counteract
        service degradation. The support of multiple protocols causes a number of new
        challenges. A peer should evaluate which protocols fulfill the communication
        requirements best. Furthermore, limited resources, such as bandwidth, should be
        distributed among peers and protocols to match application requirements. Existing
        approaches to this problem of transport selection and resource allocation are
        rigid: they calculate the solution only from the current state of the
        environment, and do not adapt their strategy based on failures or successes of
        previous allocations. This thesis explores the feasibility of using machine
        learning to improve the quality of the transport selection and resource
        allocation over current approaches. The goal is to improve the solution process
        by learning selection and allocation strategies from the experience gathered in
        the course of many iterations of the algorithm. We compare the different
        approaches in the field of machine learning with respect to their properties and
        suitability for the problem. Based on this evaluation and an in-depth analysis of
        the requirements of the underlying problem, the thesis presents a design how
        reinforcement learning can be used and adapted to the given problem domain. The
        design is evaluated with the help of simulation and a realistic implementation in
        the GNUnet Peer-to-Peer framework. Our experimental results highlight some of the
        implications of the multitude of implementation choices, key challenges, and
        possible directions for the use of reinforcement learning in this domain}, 
  www_section = {Unsorted}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/oehlmann2014machinelearning.pdf},
  url = {https://bibliography.gnunet.org}, 
}
onion-discex00
@conference{onion-discex00,
  title = {Onion Routing Access Configurations}, 
  author = {Paul Syverson and Michael Reed and David Goldschlag}, 
  booktitle = {Proceedings of the DARPA Information Survivability Conference and Exposition
        (DISCEX 2000)}, 
  organization = {IEEE CS Press}, 
  volume = {1}, 
  year = {2000}, 
  pages = {34--40}, 
  publisher = {IEEE CS Press}, 
  abstract = {Onion Routing is an infrastructure for private communication over a public
        network. It provides anonymous connections that are strongly resistant to both
        eavesdropping and traffic analysis. Thus it hides not only the data being sent,
        but who is talking to whom. Onion Routing's anonymous connections are
        bidirectional and near real-time, and can be used anywhere a socket connection
        can be used. Proxy aware applications, such as web browsing and e-mail, require
        no modification to use Onion Routing, and do so through a series of proxies.
        Other applications, such as remote login, can also use the system without
        modification. Access to an onion routing network can be configured in a variety
        of ways depending on the needs, policies, and facilities of those connecting.
        This paper describes some of these access configurations and also provides a
        basic overview of Onion Routing and comparisons with related work}, 
  www_section = {anonymity, privacy, traffic analysis}, 
  url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.34.4633}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/onion-discex00.pdf}, 
}
onion-routing:ih96
@conference{onion-routing:ih96,
  title = {Hiding Routing Information}, 
  author = {David Goldschlag and Michael Reed and Paul Syverson}, 
  booktitle = {Proceedings of Information Hiding: First International Workshop}, 
  organization = {Springer-Verlag, LNCS 1174}, 
  year = {1996}, 
  month = {May}, 
  pages = {137--150}, 
  editor = {Ross Anderson}, 
  publisher = {Springer-Verlag, LNCS 1174}, 
  abstract = {Abstract. This paper describes an architecture, Onion Routing, that limits a
        network's vulnerability to trac analysis. The architecture provides anonymous
        socket connections by means of proxy servers. It provides real-time,
        bi-directional, nonymous communication for any protocol that can be adapted to
        use a proxy service. Speci cally, the architecture provides for bi-directional
        communication even though no-one but the initiator's proxy server knows anything
        but previous and next hops in the communication chain. This implies that neither
        the respondent nor his proxy server nor any external observer need know the
        identity of the initiator or his proxy server. A prototype of Onion Routing has
        been implemented. This prototype works with HTTP (World Wide Web) proxies. In
        addition, an analogous proxy for TELNET has been implemented. Proxies for FTP and
        SMTP are under development}, 
  www_section = {communication chain, onion routing, traffic analysis}, 
  isbn = {3-540-61996-8}, 
  url = {http://portal.acm.org/citation.cfm?id=731526}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/IH-1996.pdf}, 
}
onion-routing:pet2000
@conference{onion-routing:pet2000,
  title = {Towards an Analysis of Onion Routing Security}, 
  author = {Paul Syverson and Gene Tsudik and Michael Reed and Carl Landwehr}, 
  booktitle = {Proceedings of Designing Privacy Enhancing Technologies: Workshop on Design
        Issues in Anonymity and Unobservability}, 
  organization = {Springer-Verlag, LNCS 2009}, 
  year = {2000}, 
  month = {July}, 
  pages = {96--114}, 
  publisher = {Springer-Verlag, LNCS 2009}, 
  abstract = {This paper presents a security analysis of Onion Routing, an application
        independent infrastructure for traffic-analysis-resistant and anonymous Internet
        connections. It also includes an overview of the current system design,
        definitions of security goals and new adversary models}, 
  www_section = {anonymity, privacy, traffic analysis}, 
  isbn = {978-3-540-41724-8}, 
  doi = {10.1007/3-540-44702-4}, 
  url = {http://portal.acm.org/citation.cfm?id=371981}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.34.5547.pdf}, 
}
overlier-pet2007
@conference{overlier-pet2007,
  title = {Improving Efficiency and Simplicity of Tor circuit establishment and hidden
        services}, 
  author = {Lasse {\O}verlier and Paul Syverson}, 
  booktitle = {Proceedings of the Seventh Workshop on Privacy Enhancing Technologies (PET
        2007)}, 
  organization = {Springer}, 
  year = {2007}, 
  month = {June}, 
  address = {Ottawa, Canada}, 
  editor = {Borisov, Nikita and Philippe Golle}, 
  publisher = {Springer}, 
  abstract = {In this paper we demonstrate how to reduce the overhead and delay of circuit
        establishment in the Tor anonymizing network by using predistributed
        Diffie-Hellman values. We eliminate the use of RSA encryption and decryption from
        circuit setup, and we reduce the number of DH exponentiations vs. the current Tor
        circuit setup protocol while maintaining immediate forward secrecy. We also
        describe savings that can be obtained by precomputing during idle cycles values
        that can be determined before the protocol starts. We introduce the distinction
        of eventual vs. immediate forward secrecy and present protocols that illustrate
        the distinction. These protocols are even more efficient in communication and
        computation than the one we primarily propose, but they provide only eventual
        forward secrecy. We describe how to reduce the overhead and the complexity of
        hidden server connections by using our DH-values to implement valet nodes and
        eliminate the need for rendezvous points as they exist today. We also discuss the
        security of the new elements and an analysis of efficiency improvements}, 
  www_section = {public key cryptography}, 
  doi = {10.1007/978-3-540-75551-7}, 
  url = {http://www.springerlink.com/content/j68v312681l8v874/}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/overlier-pet2007.pdf}, 
}
p2p09-peersim
@conference{p2p09-peersim,
  title = {PeerSim: A Scalable P2P Simulator}, 
  author = {Alberto Montresor and M{\'a}rk Jelasity and Gian Paolo Jesi and Spyros
        Voulgaris}, 
  booktitle = {P2P'09--Proceedings of the 9th International Conference on Peer-to-Peer}, 
  year = {2009}, 
  month = {September}, 
  address = {Seattle, WA}, 
  pages = {99--100}, 
  abstract = {The key features of peer-to-peer (P2P) systems are scalability and dynamism.
        The evaluation of a P2P protocol in realistic environments is very expensive and
        difficult to reproduce, so simulation is crucial in P2P research. PeerSim is an
        extremely scalable simulation environment that supports dynamic scenarios such as
        churn and other failure models. Protocols need to be specifically implemented for
        the PeerSim Java API, but with a reasonable effort they can be evolved into a
        real implementation. Testing in specified parameter-spaces is supported as well.
        PeerSim started out as a tool for our own research}, 
  www_section = {P2P, peer-to-peer networking, PeerSim, simulation}, 
  isbn = {978-1-4244-5066-4}, 
  doi = {http://dx.doi.org/10.1109/P2P.2009.5284506}, 
  url = {http://peersim.sourceforge.net/}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/P2P\%2709\%20-\%20PeerSim.pdf},
}
p4t2016
@conference{p4t2016,
  title = {Privacy-Preserving Abuse Detection in Future Decentralised Online Social
        Networks}, 
  author = {{\'A}lvaro Garc{\'\i}a-Recuero and Jeffrey Burdges and Christian Grothoff}, 
  booktitle = {Data Privacy Management (DPM)}, 
  organization = {Springer}, 
  year = {2016}, 
  month = {September}, 
  address = {Heraklion, Greece}, 
  publisher = {Springer}, 
  abstract = {Future online social networks need to not only protect sensitive data of
        their users, but also protect them from abusive behavior coming from malicious
        participants in the network. We investigate the use of supervised learning
        techniques to detect abusive behavior and describe privacy-preserving protocols
        to compute the feature set required by abuse classification algorithms in a
        secure and privacy-preserving way. While our method is not yet fully resilient
        against a strong adaptive adversary, our evaluation suggests that it will be
        useful to detect abusive behavior with a minimal impact on privacy}, 
  www_section = {abuse, GNUnet, Privacy preserving, reputation, Social networking}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/p4t.pdf}, 
  www_tags = {selected}, 
  url = {https://bibliography.gnunet.org}, 
}
padlipky78
@article{padlipky78,
  title = {Limitations of End-to-End Encryption in Secure Computer Networks}, 
  author = {Michael A. Padlipsky and David W. Snow and Paul A. Karger}, 
  journal = {unknown}, 
  institution = {The MITRE Corporation: Bedford MA, HQ Electronic Systems Division}, 
  number = {ESD-TR-78-158}, 
  year = {1978}, 
  month = {August}, 
  address = {Hanscom AFB, MA}, 
  www_section = {traffic analysis}, 
  url = {http://stinet.dtic.mil/cgi-bin/GetTRDoc?AD=3DA059221\&Location=3DU2\&doc=3D+=GetTRDoc.pdf},
}
panic2014
@mastersthesis{panic2014,
  title = {An Approach for Home Routers to Securely Erase Sensitive Data}, 
  author = {Nicolas Bene{\v s}}, 
  school = {Technische Universit{\"a}t M{\"u}nchen}, 
  volume = {Bachelor}, 
  year = {2014}, 
  month = {October}, 
  address = {Munich}, 
  pages = {0--64}, 
  type = {Bachelor Thesis}, 
  abstract = {Home routers are always-on low power embedded systems and part of the
        Internet infrastructure. In addition to the basic router functionality, they can
        be used to operate sensitive personal services, such as for private web and email
        servers, secure peer-to-peer networking services like GNUnet and Tor, and
        encrypted network file system services. These services naturally involve
        cryptographic operations with the cleartext keys being stored in RAM. This makes
        router devices possible targets to physical attacks by home intruders. Attacks
        include interception of unprotected data on bus wires, alteration of firmware
        through exposed JTAG headers, or recovery of cryptographic keys through the cold
        boot attack. This thesis presents Panic!, a combination of open hardware design
        and free software to detect physical integrity attacks and to react by securely
        erasing cryptographic keys and other sensitive data from memory. To improve
        auditability and to allow cheap reproduction, the components of Panic! are kept
        simple in terms of conceptual design and lines of code. First, the motivation to
        use home routers for services besides routing and the need to protect their
        physical integrity is discussed. Second, the idea and functionality of the Panic!
        system is introduced and the high-level interactions between its components
        explained. Third, the software components to be run on the router are described.
        Fourth, the requirements of the measurement circuit are declared and a prototype
        is presented. Fifth, some characteristics of pressurized environments are
        discussed and the difficulties for finding adequate containments are explained.
        Finally, an outlook to tasks left for the future is given}, 
  www_section = {GNUnet, home router, intrusion detection, memory erasure, Panic, physical
        access}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/panic.pdf}, 
  www_tags = {selected}, 
  url = {https://bibliography.gnunet.org}, 
}
paper_short2014
@conference{paper_short2014,
  title = {Automatic Transport Selection and Resource Allocation for Resilient
        Communication in Decentralised Networks}, 
  author = {Matthias Wachs and Fabian Oehlmann and Christian Grothoff}, 
  booktitle = {14-th IEEE International Conference on Peer-to-Peer Computing}, 
  year = {2014}, 
  month = {October}, 
  address = {London. England}, 
  abstract = {Making communication more resilient is a main focus for modern decentralised
        networks. A current development to increase connectivity between participants and
        to be resilient against service degradation attempts is to support different
        communication protocols, and to switch between these protocols in case
        degradation or censorship are detected. Supporting multiple protocols with
        different properties and having to share resources for communication with
        multiple partners creates new challenges with respect to protocol selection and
        resource allocation to optimally satisfy the applications' requirements for
        communication. This paper presents a novel approach for automatic transport
        selection and resource allocation with a focus on decentralised networks. Our
        goal is to evaluate the communication mechanisms available for each communication
        partner and then allocate resources in line with the requirements of the
        applications. We begin by detailing the overall requirements for an algorithm for
        transport selection and resource allocation, and then compare three different
        solutions using (1) a heuristic, (2) linear optimisation, and (3) machine
        learning. To show the suitability and the specific benefits of each approach, we
        evaluate their performance with respect to usability, scalability and quality of
        the solution found in relation to application requirements}, 
  www_section = {GNUnet, resource allocation}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/paper_short.pdf}, 
  www_tags = {selected}, 
  url = {https://bibliography.gnunet.org}, 
}
patterns-failure
@conference{patterns-failure,
  title = {Real World Patterns of Failure in Anonymity Systems}, 
  author = {Richard Clayton and George Danezis and Markus G. Kuhn}, 
  booktitle = {Proceedings of Information Hiding Workshop (IH 2001)}, 
  organization = {Springer-Verlag, LNCS 2137}, 
  year = {2001}, 
  month = {April}, 
  pages = {230--244}, 
  editor = {Ira S. Moskowitz}, 
  publisher = {Springer-Verlag, LNCS 2137}, 
  abstract = {We present attacks on the anonymity and pseudonymity provided by a "lonely
        hearts" dating service and by the HushMail encrypted email system. We move on to
        discuss some generic attacks upon anonymous systems based on the engineering
        reality of these systems rather than the theoretical foundations on which they
        are based. However, for less sophisticated users it is social engineering
        attacks, owing nothing to computer science, that pose the biggest day-to-day
        danger. This practical experience then permits a start to be made on developing a
        security policy model for pseudonymous communications}, 
  www_section = {pseudonym, security policy}, 
  isbn = {3-540-42733-3}, 
  url = {http://portal.acm.org/citation.cfm?id=731864}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Patterns_of_Failure.pdf},
}
perea-tissec11
@article{perea-tissec11,
  title = {PEREA: Practical TTP-free revocation of repeatedly misbehaving anonymous users}, 
  author = {Man Ho Au and Patrick P. Tsang and Apu Kapadia}, 
  journal = {ACM Transactions on Information and System Security ({ACM TISSEC})}, 
  volume = {14}, 
  year = {2011}, 
  month = {December}, 
  address = {New York, NY, USA}, 
  pages = {29:1--29:34}, 
  publisher = {ACM}, 
  abstract = {Several anonymous authentication schemes allow servers to revoke a
        misbehaving user's future accesses. Traditionally, these schemes have relied on
        powerful Trusted Third Parties (TTPs) capable of deanonymizing (or linking)
        users' connections. Such TTPs are undesirable because users' anonymity is not
        guaranteed, and users must trust them to judge {\textquoteleft}misbehavior'
        fairly. Recent schemes such as Blacklistable Anonymous Credentials (BLAC) and
        Enhanced Privacy ID (EPID) support {\textquotedblleft}privacy-enhanced
        revocation{\textquotedblright} {\textemdash} servers can revoke misbehaving users
        without a TTP's involvement, and without learning the revoked users' identities.
        In BLAC and EPID, however, the computation required for authentication at the
        server is linear in the size (L) of the revocation list, which is impractical as
        the size approaches thousands of entries. We propose PEREA, a new anonymous
        authentication scheme for which this bottleneck of computation is independent of
        the size of the revocation list. Instead, the time complexity of authentication
        is linear in the size of a revocation window K L, the number of subsequent
        authentications before which a user's misbehavior must be recognized if the user
        is to be revoked. We extend PEREA to support more complex revocation policies
        that take the severity of misbehaviors into account. Users can authenticate
        anonymously if their naughtiness, i.e., the sum of the severities of their
        blacklisted misbehaviors, is below a certain naughtiness threshold. We call our
        extension PEREA-Naughtiness. We prove the security of our constructions, and
        validate their efficiency as compared to BLAC both analytically and
        quantitatively}, 
  www_section = {anonymous authentication, anonymous blacklisting, privacy,
        privacy-enhanced revocation, user misbehavior}, 
  issn = {1094-9224}, 
  doi = {http://doi.acm.org/10.1145/2043628.2043630}, 
  url = {http://doi.acm.org/10.1145/2043628.2043630}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/TISSEC\%20-\%20PEREA.pdf},
}
pet05-bissias
@conference{pet05-bissias,
  title = {Privacy Vulnerabilities in Encrypted HTTP Streams}, 
  author = {George Dean Bissias and Marc Liberatore and Brian Neil Levine}, 
  booktitle = {Proceedings of Privacy Enhancing Technologies workshop (PET 2005)}, 
  organization = {Springer Berlin / Heidelberg}, 
  year = {2005}, 
  month = {May}, 
  pages = {1--11}, 
  publisher = {Springer Berlin / Heidelberg}, 
  abstract = {Encrypting traffic does not prevent an attacker from performing some types of
        traffic analysis. We present a straightforward traffic analysis attack against
        encrypted HTTP streams that is surprisingly effective in identifying the source
        of the traffic. An attacker starts by creating a profile of the statistical
        characteristics of web requests from interesting sites, including distributions
        of packet sizes and inter-arrival times. Later, candidate encrypted streams are
        compared against these profiles. In our evaluations using real traffic, we find
        that many web sites are subject to this attack. With a training period of 24
        hours and a 1 hour delay afterwards, the attack achieves only 23\% accuracy.
        However, an attacker can easily pre-determine which of trained sites are easily
        identifiable. Accordingly, against 25 such sites, the attack achieves 40\%
        accuracy; with three guesses, the attack achieves 100\% accuracy for our data.
        Longer delays after training decrease accuracy, but not substantially. We also
        propose some countermeasures and improvements to our current method. Previous
        work analyzed SSL traffic to a proxy, taking advantage of a known flaw in SSL
        that reveals the length of each web object. In contrast, we exploit the
        statistical characteristics of web streams that are encrypted as a single flow,
        which is the case with WEP/WPA, IPsec, and SSH tunnels}, 
  www_section = {privacy, traffic analysis}, 
  isbn = {978-3-540-34745-3}, 
  doi = {10.1007/11767831}, 
  url = {http://www.springerlink.com/content/1062w684754754h4/}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/pet05-bissias.pdf}, 
}
pet05-borisov
@conference{pet05-borisov,
  title = {An Analysis of Parallel Mixing with Attacker-Controlled Inputs}, 
  author = {Borisov, Nikita}, 
  booktitle = {Proceedings of Privacy Enhancing Technologies workshop (PET 2005)}, 
  organization = {Springer Berlin / Heidelberg}, 
  year = {2005}, 
  month = {May}, 
  pages = {12--25}, 
  publisher = {Springer Berlin / Heidelberg}, 
  abstract = {Parallel mixing [7] is a technique for optimizing the latency of a
        synchronous re-encryption mix network. We analyze the anonymity of this technique
        when an adversary can learn the output positions of some of the inputs to the mix
        network. Using probabilistic modeling, we show that parallel mixing falls short
        of achieving optimal anonymity in this case. In particular, when the number of
        unknown inputs is small, there are significant anonymity losses in the expected
        case. This remains true even if all the mixes in the network are honest, and
        becomes worse as the number of mixes increases. We also consider repeatedly
        applying parallel mixing to the same set of inputs. We show that an attacker who
        knows some input--output relationships will learn new information with each
        mixing and can eventually link previously unknown inputs and outputs}, 
  www_section = {anonymity, mix}, 
  isbn = {978-3-540-34745-3}, 
  doi = {10.1007/11767831}, 
  url = {http://www.springerlink.com/content/b0t0714165846m42/}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/pet05-borisov.pdf}, 
}
pet05-camenisch
@conference{pet05-camenisch,
  title = {Mix-network with Stronger Security}, 
  author = {Jan Camenisch and Anton Mityagin}, 
  booktitle = {Proceedings of Privacy Enhancing Technologies workshop (PET 2005)}, 
  organization = {Springer Berlin / Heidelberg}, 
  year = {2005}, 
  month = {May}, 
  pages = {128--147}, 
  publisher = {Springer Berlin / Heidelberg}, 
  abstract = {We consider a mix-network as a cryptographic primitive that provides
        anonymity. A mix-network takes as input a number of ciphertexts and outputs a
        random shuffle of the corresponding plaintexts. Common applications of mix-nets
        are electronic voting and anonymous network traffic. In this paper, we present a
        novel construction of a mix-network, which is based on shuffling ElGamal
        encryptions. Our scheme is the first mix-net to meet the strongest security
        requirements: it is robust and secure against chosen ciphertext attacks as well
        as against active attacks in the Universally Composable model. Our construction
        allows one to securely execute several mix-net instances concurrently, as well as
        to run multiple mix-sessions without changing a set of keys. Nevertheless, the
        scheme is efficient: it requires a linear work (in the number of input messages)
        per mix-server}, 
  www_section = {anonymity, electronic voting}, 
  isbn = {978-3-540-34745-3}, 
  doi = {10.1007/11767831}, 
  url = {http://www.springerlink.com/content/v32m5122127m78v0/}, 
}
pet05-serjantov
@conference{pet05-serjantov,
  title = {Message Splitting Against the Partial Adversary}, 
  author = {Andrei Serjantov and Steven J. Murdoch}, 
  booktitle = {Proceedings of Privacy Enhancing Technologies workshop (PET 2005)}, 
  organization = {Springer Berlin / Heidelberg}, 
  year = {2005}, 
  month = {May}, 
  pages = {26--39}, 
  publisher = {Springer Berlin / Heidelberg}, 
  abstract = {We review threat models used in the evaluation of anonymity systems'
        vulnerability to traffic analysis. We then suggest that, under the partial
        adversary model, if multiple packets have to be sent through these systems, more
        anonymity can be achieved if senders route the packets via different paths. This
        is in contrast to the normal technique of using the same path for them all. We
        comment on the implications of this for message-based and connection-based
        anonymity systems. We then proceed to examine the only remaining traffic analysis
        attack -- one which considers the entire system as a black box. We show that it
        is more difficult to execute than the literature suggests, and attempt to
        empirically estimate the parameters of the Mixmaster and the Mixminion systems
        needed in order to successfully execute the attack}, 
  www_section = {anonymity, traffic analysis}, 
  isbn = {978-3-540-34745-3}, 
  doi = {10.1007/11767831}, 
  url = {http://www.springerlink.com/content/375x2pv385388h86/}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/pet05-serjantov.pdf}, 
}
pet05-zhu
@conference{pet05-zhu,
  title = {Unmixing Mix Traffic}, 
  author = {Ye Zhu and Riccardo Bettati}, 
  booktitle = {Proceedings of Privacy Enhancing Technologies workshop (PET 2005)}, 
  organization = {Springer Berlin / Heidelberg}, 
  year = {2005}, 
  month = {May}, 
  pages = {110--127}, 
  publisher = {Springer Berlin / Heidelberg}, 
  abstract = {We apply blind source separation techniques from statistical signal
        processing to separate the traffic in a mix network. Our experiments show that
        this attack is effective and scalable. By combining the flow separation method
        and frequency spectrum matching method, a passive attacker can get the traffic
        map of the mix network. We use a non-trivial network to show that the combined
        attack works. The experiments also show that multicast traffic can be dangerous
        for anonymity networks}, 
  www_section = {anonymity}, 
  isbn = {978-3-540-34745-3}, 
  doi = {10.1007/11767831}, 
  url = {http://www.springerlink.com/content/l5110366246k5003/}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/pet05-zhu.pdf}, 
}
pets2011-bagai
@conference{pets2011-bagai,
  title = {An Accurate System-Wide Anonymity Metric for Probabilistic Attacks}, 
  author = {Rajiv Bagai and Huabo Lu and Rong Li and Bin Tang}, 
  booktitle = {PETS'11--Proceedings of the 11th Privacy Enhancing Technologies Symposium}, 
  year = {2011}, 
  month = {July}, 
  address = {Waterloo, Canada}, 
  abstract = {We give a critical analysis of the system-wide anonymity metric of Edman et
        al. [3], which is based on the permanent value of a doubly-stochastic matrix. By
        providing an intuitive understanding of the permanent of such a matrix, we show
        that a metric that looks no further than this composite value is at best a rough
        indicator of anonymity. We identify situations where its inaccuracy is acute, and
        reveal a better anonymity indicator. Also, by constructing an
        information-preserving embedding of a smaller class of attacks into the wider
        class for which this metric was proposed, we show that this metric fails to
        possess desirable generalization properties. Finally, we present a new anonymity
        metric that does not exhibit these shortcomings. Our new metric is accurate as
        well as general}, 
  www_section = {combinatorial matrix theory, probabilistic attacks, system-wide anonymity
        metric}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/PETS\%2711\%20-\%20An\%20Accurate\%20System-Wide\%20Anonymity\%20Metric\%20for\%20Probabilistic\%20Attacks.pdf},
  url = {https://bibliography.gnunet.org}, 
}
pets2011-defenestrator
@conference{pets2011-defenestrator,
  title = {DefenestraTor: Throwing out Windows in Tor}, 
  author = {Mashael AlSabah and Kevin Bauer and Ian Goldberg and Dirk Grunwald and Damon
        McCoy and Stefan Savage and Geoffrey M. Voelker}, 
  booktitle = {PETS'11--Proceedings of the 11th Privacy Enhancing Technologies Symposium}, 
  year = {2011}, 
  month = {July}, 
  address = {Waterloo, Canada}, 
  abstract = {Tor is one of the most widely used privacy enhancing technologies for
        achieving online anonymity and resisting censorship. While conventional wisdom
        dictates that the level of anonymity offered by Tor increases as its user base
        grows, the most significant obstacle to Tor adoption continues to be its slow
        performance. We seek to enhance Tor's performance by offering techniques to
        control congestion and improve flow control, thereby reducing unnecessary delays.
        To reduce congestion, we first evaluate small fixed-size circuit windows and a
        dynamic circuit window that adaptively re-sizes in response to perceived
        congestion. While these solutions improve web page response times and require
        modification only to exit routers, they generally offer poor flow control and
        slower downloads relative to Tor's current design. To improve flow control while
        reducing congestion, we implement N23, an ATM-style per-link algorithm that
        allows Tor routers to explicitly cap their queue lengths and signal congestion
        via back-pressure. Our results show that N23 offers better congestion and flow
        control, resulting in improved web page response times and faster page loads
        compared to Tor's current design and other window-based approaches. We also argue
        that our proposals do not enable any new attacks on Tor users' privacy}, 
  www_section = {congestion, DefenestraTor, online anonymity, performance, privacy
        enhancing technologies, Tor, Windows}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/PETS\%2711\%20-\%20DefenestraTor.pdf},
  url = {https://bibliography.gnunet.org}, 
}
pfitzmann85
@conference{pfitzmann85,
  title = {Networks Without User Observability -- Design Options}, 
  author = {Andreas Pfitzmann and Michael Waidner}, 
  booktitle = {Proceedings of EUROCRYPT 1985}, 
  organization = {Springer-Verlag New York, Inc}, 
  year = {1985}, 
  month = {April}, 
  address = {Linz, Austria}, 
  publisher = {Springer-Verlag New York, Inc}, 
  abstract = {In present-day communication networks, the network operator or an intruder
        could easily observe when, how much and with whom the users communicate (traffic
        analysis), even if the users employ end-to-end encryption. With the increasing
        use of ISDNs, this becomes a severe threat. Therefore, we summarize basic
        concepts to keep the recipient and sender or at least their relationship
        unobservable, consider some possible implementations and necessary hierarchical
        extensions, and propose some suitable performance and reliability enhancements}, 
  www_section = {anonymity, dining cryptographers, fault-tolerance, ISDN, mix, ring
        network, traffic analysis, user observability}, 
  isbn = {0-387-16468-5}, 
  url = {http://www.semper.org/sirene/publ/PfWa_86anonyNetze.html}, 
}
pianese:pulse
@conference{pianese:pulse,
  title = {PULSE, a Flexible P2P Live Streaming System}, 
  author = {Fabio Pianese and Joaqu{\'\i}n Keller and E W Biersack}, 
  booktitle = {INFOCOM'06. Proceedings of the 25th IEEE International Conference on
        Computer Communications}, 
  organization = {IEEE Computer Society}, 
  year = {2006}, 
  month = {April}, 
  address = {Barcelona, Catalunya, Spain}, 
  pages = {-1--1}, 
  publisher = {IEEE Computer Society}, 
  abstract = {With the widespread availability of inexpensive broadband Internet
        connections for home-users, a large number of bandwidth-intensive applications
        previously not feasible have now become practical. This is the case for
        multimedia live streaming, for which end-user's dial-up/ISDN modem connections
        once were the bottleneck. The bottleneck is now mostly found on the server side:
        the bandwidth required for serving many clients at once is large and thus very
        costly to the broadcasting entity. Peer-to-peer systems for on-demand and live
        streaming have proved to be an encouraging solution, since they can shift the
        burden of content distribution from the server to the users of the network. In
        this work we introduce PULSE, a P2P system for live streaming whose main goals
        are flexibility, scalability, and robustness. We present the fundamental concepts
        that stand behind the design of PULSE along with its intended global behavior,
        and describe in detail the main algorithms running on its nodes}, 
  www_section = {peer-to-peer networking, pulse}, 
  isbn = {1-4244-0221-2}, 
  doi = {http://dx.doi.org/10.1109/INFOCOM.2006.42}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/INFOCOM\%2706\%20-\%20Pianese\%2C\%20Keller\%20\%26\%20Biersack\%20-\%20PULSE.pdf},
}
pipenet10
@booklet{pipenet10,
  title = {PipeNet 1.0}, 
  author = {Dai, Wei}, 
  year = {1998}, 
  month = {January}, 
  url = {http://weidai.com/pipenet.txt}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/http___freehaven.net_anonbib_cache_pipenet10.html_.pdf},
%%%%% ERROR: Missing field
% www_section = {?????},
}
pir
@conference{pir,
  title = {Private Information Retrieval}, 
  author = {Benny Chor and Oded Goldreich and Eyal Kushilevitz and Madhu Sudan}, 
  booktitle = {Proceedings of the IEEE Symposium on Foundations of Computer Science}, 
  organization = {ACM New York, NY, USA}, 
  year = {1995}, 
  pages = {41--50}, 
  publisher = {ACM New York, NY, USA}, 
  abstract = {Publicly accessible databases are an indispensable resource for retrieving
        up-to-date information. But they also pose a significant risk to the privacy of
        the user, since a curious database operator can follow the user's queries and
        infer what the user is after. Indeed, in cases where the users' intentions are to
        be kept secret, users are often cautious about accessing the database. It can be
        shown that when accessing a single database, to completely guarantee the privacy
        of the user, the whole database should be down-loaded; namely n bits should be
        communicated (where n is the number of bits in the database).In this work, we
        investigate whether by replicating the database, more efficient solutions to the
        private retrieval problem can be obtained. We describe schemes that enable a user
        to access k replicated copies of a database (k>=2) and privately retrieve
        information stored in the database. This means that each individual server
        (holding a replicated copy of the database) gets no information on the identity
        of the item retrieved by the user. Our schemes use the replication to gain
        substantial saving. In particular, we present a two-server scheme with
        communication complexity O(n1/3)}, 
  doi = {http://doi.acm.org/10.1145/293347.293350}, 
  url = {http://portal.acm.org/citation.cfm?id=293347.293350}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/pir.pdf}, 
  www_section = {Unsorted}, 
}
pir2014
@book{pir2014,
  title = {The Best of Both Worlds: Combining Information-Theoretic and Computational PIR
        for Communication Efficiency}, 
  author = {Devet, Casey and Goldberg, Ian}, 
  booktitle = {Privacy Enhancing Technologies}, 
  organization = {Springer International Publishing}, 
  volume = {8555}, 
  year = {2014}, 
  pages = {63--82}, 
  editor = {De Cristofaro, Emiliano and Murdoch, StevenJ}, 
  publisher = {Springer International Publishing}, 
  series = {Lecture Notes in Computer Science}, 
  abstract = {The goal of Private Information Retrieval (PIR) is the ability to query a
        database successfully without the operator of the database server discovering
        which record(s) of the database the querier is interested in. There are two main
        classes of PIR protocols: those that provide privacy guarantees based on the
        computational limitations of servers (CPIR) and those that rely on multiple
        servers not colluding for privacy (IT-PIR). These two classes have different
        advantages and disadvantages that make them more or less attractive to designers
        of PIR-enabled privacy enhancing technologies. We present a hybrid PIR protocol
        that combines two PIR protocols, one from each of these classes. Our protocol
        inherits many positive aspects of both classes and mitigates some of the negative
        aspects. For example, our hybrid protocol maintains partial privacy when the
        security assumptions of one of the component protocols is broken, mitigating the
        privacy loss in such an event. We have implemented our protocol as an extension
        of the Percy++ library so that it combines a PIR protocol by Aguilar Melchor and
        Gaborit with one by Goldberg. We show that our hybrid protocol uses less
        communication than either of these component protocols and that our scheme is
        particularly beneficial when the number of records in a database is large
        compared to the size of the records. This situation arises in applications such
        as TLS certificate verification, anonymous communications systems, private LDAP
        lookups, and others}, 
  isbn = {978-3-319-08505-0}, 
  doi = {10.1007/978-3-319-08506-7_4}, 
  url = {http://dx.doi.org/10.1007/978-3-319-08506-7_4}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/pir_0.pdf}, 
  www_section = {Unsorted}, 
}
pitchblack
@conference{pitchblack,
  title = {Routing in the Dark: Pitch Black}, 
  author = {Nathan S Evans and Chis GauthierDickey and Christian Grothoff}, 
  booktitle = {23rd Annual Computer Security Applications Conference (ACSAC 2007)}, 
  organization = {IEEE Computer Society}, 
  year = {2007}, 
  pages = {305--314}, 
  publisher = {IEEE Computer Society}, 
  abstract = {In many networks, such as mobile ad-hoc networks and friend-to-friend overlay
        networks, direct communication between nodes is limited to specific neighbors.
        Often these networks have a small-world topology; while short paths exist between
        any pair of nodes in small-world networks, it is non-trivial to determine such
        paths with a distributed algorithm. Recently, Clarke and Sandberg proposed the
        first decentralized routing algorithm that achieves efficient routing in such
        small-world networks. This paper is the first independent security analysis of
        Clarke and Sandberg's routing algorithm. We show that a relatively weak
        participating adversary can render the overlay ineffective without being
        detected, resulting in significant data loss due to the resulting load imbalance.
        We have measured the impact of the attack in a testbed of 800 nodes using minor
        modifications to Clarke and Sandberg's implementation of their routing algorithm
        in Freenet. Our experiments show that the attack is highly effective, allowing a
        small number of malicious nodes to cause rapid loss of data on the entire
        network. We also discuss various proposed countermeasures designed to detect,
        thwart or limit the attack. While we were unable to find effective
        countermeasures, we hope that the presented analysis will be a first step towards
        the design of secure distributed routing algorithms for restricted-route
        topologies}, 
  www_section = {denial-of-service, Freenet, installation, routing}, 
  url = {http://grothoff.org/christian/pitchblack.pdf}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/pitchblack.pdf}, 
}
pizzonia2008netkit
@conference{pizzonia2008netkit,
  title = {Netkit: easy emulation of complex networks on inexpensive hardware}, 
  author = {Pizzonia, Maurizio and Rimondini, Massimo}, 
  booktitle = {Proceedings of the 4th International Conference on Testbeds and research
        infrastructures for the development of networks \& communities}, 
  organization = {ICST (Institute for Computer Sciences, Social-Informatics and
        Telecommunications Engineering)}, 
  year = {2008}, 
  address = {ICST, Brussels, Belgium, Belgium}, 
  pages = {7:1--7:10}, 
  publisher = {ICST (Institute for Computer Sciences, Social-Informatics and
        Telecommunications Engineering)}, 
  series = {TridentCom '08}, 
  www_section = {network emulation, routing, user-mode Linux, virtual laboratories}, 
  isbn = {978-963-9799-24-0}, 
  url = {http://dl.acm.org/citation.cfm?id=1390576.1390585}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/a7-pizzonia.pdf}, 
}
pool-dummy04
@conference{pool-dummy04,
  title = {Reasoning about the Anonymity Provided by Pool Mixes that Generate Dummy
        Traffic}, 
  author = {Claudia Diaz and Bart Preneel}, 
  booktitle = {Proceedings of 6th Information Hiding Workshop (IH 2004)}, 
  year = {2004}, 
  month = {May}, 
  address = {Toronto}, 
  series = {LNCS}, 
  abstract = {In this paper we study the anonymity provided by genralized mixes that insert
        dummy traffic. Mixes are an essential component to offer anonymous email
        services. We indicate how to compute the recipient and sender anonymity and we
        point out some problems that may arise from the intutitive extension of the
        metric to make into account dummies. Two possible ways of inserting dummy traffic
        are disussed and compared. An active attack scenario is considered, and the
        anonymity provided by mixes under the attack is analyzed}, 
  www_section = {anonymity}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/pool-dummy04.pdf}, 
}
private_presence_service2014
@article{private_presence_service2014,
  title = {DP5: A Private Presence Service}, 
  author = {Borisov, Nikita and Danezis, George and Goldberg, Ian}, 
  journal = {Centre for Applied Cryptographic Research (CACR), University of Waterloo}, 
  year = {2014}, 
  month = {May}, 
  type = {Technical Report}, 
  abstract = {The recent NSA revelations have shown that {\textquotedblleft}address
        book{\textquotedblright} and {\textquotedblleft}buddy list{\textquotedblright}
        information are routinely targeted for mass interception. As a response to this
        threat, we present DP5, a cryptographic service that provides privacy-friendly
        indication of presence to support real-time communications. DP5 allows clients to
        register and query the online presence of their list of friends while keeping
        this list secret. Besides presence, high-integrity status updates are supported,
        to facilitate key update and rendezvous protocols. While infrastructure services
        are required for DP5 to operate, they are designed to not require any long-term
        secrets and provide perfect forward secrecy in case of compromise. We provide
        security arguments for the indistinguishability properties of the protocol, as
        well as an evaluation of its performance}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/DP5\%3A\%20A\%20Private\%20Presence\%20Service.pdf},
  www_section = {Unsorted}, 
  url = {https://bibliography.gnunet.org}, 
}
proximax11
@conference{proximax11,
  title = {Proximax: Fighting Censorship With an Adaptive System for Distribution of Open
        Proxies}, 
  author = {Kirill Levchenko and Damon McCoy}, 
  booktitle = {FC'11--Proceedings of Financial Cryptography and Data Security}, 
  year = {2011}, 
  month = {February}, 
  address = {St. Lucia}, 
  abstract = {Many people currently use proxies to circumvent government censorship that
        blocks access to content on the Internet. Unfortunately, the dissemination
        channels used to distribute proxy server locations are increasingly being
        monitored to discover and quickly block these proxies. This has given rise to a
        large number of ad hoc dissemination channels that leverage trust networks to
        reach legitimate users and at the same time prevent proxy server addresses from
        falling into the hands of censors. To address this problem in a more principled
        manner, we present Proximax, a robust system that continuously distributes pools
        of proxies to a large number of channels. The key research challenge in Proximax
        is to distribute the proxies among the different channels in a way that maximizes
        the usage of these proxies while minimizing the risk of having them blocked. This
        is challenging because of two conflicting goals: widely disseminating the
        location of the proxies to fully utilize their capacity and preventing (or at
        least delaying) their discovery by censors. We present a practical system that
        lays out a design and analytical model that balances these factors}, 
  www_section = {government censorship, Proximax, proxy}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/FC\%2711\%20-\%20Proximax.pdf},
  url = {https://bibliography.gnunet.org}, 
}
pt:03:ldpc
@booklet{pt:03:ldpc,
  title = {On the Practical Use of LDPC Erasure Codes for Distributed Storage
        Applications}, 
  author = {James S. Plank and Michael G. Thomason}, 
  number = {CS-03-510}, 
  year = {2003}, 
  month = {September}, 
  publisher = {University of Tennessee}, 
  abstract = {This paper has been submitted for publication. Please see the above URL for
        current publication status. As peer-to-peer and widely distributed storage
        systems proliferate, the need to perform efficient erasure coding, instead of
        replication, is crucial to performance and efficiency. Low-Density Parity-Check
        (LDPC) codes have arisen as alternatives to standard erasure codes, such as
        Reed-Solomon codes, trading off vastly improved decoding performance for
        inefficiencies in the amount of data that must be acquired to perform decoding.
        The scores of papers written on LDPC codes typically analyze their collective and
        asymptotic behavior. Unfortunately, their practical application requires the
        generation and analysis of individual codes for finite systems. This paper
        attempts to illuminate the practical considerations of LDPC codes for
        peer-to-peer and distributed storage systems. The three main types of LDPC codes
        are detailed, and a huge variety of codes are generated, then analyzed using
        simulation. This analysis focuses on the performance of individual codes for
        finite systems, and addresses several important heretofore unanswered questions
        about employing LDPC codes in real-world systems. This material is based upon
        work supported by the National}, 
  www_section = {distributed hash table, distributed storage, LDPC, P2P}, 
  url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.131.5709}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/ut-cs-03-510.pdf}, 
}
publius
@conference{publius,
  title = {Publius: A robust, tamper-evident, censorship-resistant and source-anonymous web
        publishing system}, 
  author = {Marc Waldman and Aviel D. Rubin and Lorrie Cranor}, 
  booktitle = {Proceedings of the 9th USENIX Security Symposium}, 
  year = {2000}, 
  month = {August}, 
  pages = {59--72}, 
  abstract = {We describe a system that we have designed and implemented for publishing
        content on the web. Our publishing scheme has the property that it is very
        difficult for any adversary to censor or modify the content. In addition, the
        identity of the publisher is protected once the content is posted. Our system
        differs from others in that we provide tools for updating or deleting the
        published content, and users can browse the content in the normal point and click
        manner using a standard web browser and a client-side proxy that we provide. All
        of our code is freely available}, 
  url = {http://portal.acm.org/citation.cfm?id=1251311}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/publius.pdf}, 
%%%%% ERROR: Missing field
% www_section = {?????},
}
quant-adhoc
@conference{quant-adhoc,
  title = {Quantification of Anonymity for Mobile Ad Hoc Networks}, 
  author = {Marie Elisabeth Gaup Moe}, 
  booktitle = {Proceedings of the 4th International Workshop on Security and Trust
        Management (STM 08)}, 
  organization = {Elsevier Science Publishers B. V. Amsterdam, The Netherlands, The
        Netherlands}, 
  year = {2008}, 
  month = {June}, 
  address = {Trondheim, Norway}, 
  pages = {25--36}, 
  publisher = {Elsevier Science Publishers B. V. Amsterdam, The Netherlands, The
        Netherlands}, 
  abstract = {We propose a probabilistic system model for anonymous ad hoc routing
        protocols that takes into account the a priori knowledge of the adversary, and
        illustrate how the information theoretical entropy can be used for quantification
        of the anonymity offered by a routing protocol as the adversary captures an
        increasing number of nodes in the network. The proposed measurement schema is
        applied to ANODR and ARM routing protocols}, 
  www_section = {ad-hoc networks, anonymity, routing, security model}, 
  doi = {10.1016/j.entcs.2009.07.041}, 
  url = {http://portal.acm.org/citation.cfm?id=1619033}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/quant-adhoc.pdf}, 
}
rackoff93cryptographic
@conference{rackoff93cryptographic,
  title = {Cryptographic Defense Against Traffic Analysis}, 
  author = {Charles Rackoff and Daniel R. Simon}, 
  booktitle = {Proceedings of ACM Symposium on Theory of Computing}, 
  organization = {ACM New York, NY, USA}, 
  year = {1993}, 
  address = {San Diego, California, United States}, 
  pages = {672--681}, 
  publisher = {ACM New York, NY, USA}, 
  isbn = {0-89791-591-7}, 
  doi = {http://doi.acm.org/10.1145/167088.167260}, 
  url = {http://portal.acm.org/citation.cfm?id=167088.167260}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/rackoff93cryptographic.pdf},
  www_section = {Unsorted}, 
}
randomized-checking
@conference{randomized-checking,
  title = {Making mix nets robust for electronic voting by randomized partial checking}, 
  author = {Jakobsson, Markus and Ari Juels and Ron Rivest}, 
  booktitle = {Proceedings of the 11th USENIX Security Symposium}, 
  organization = {USENIX Association Berkeley, CA, USA}, 
  year = {2002}, 
  month = {August}, 
  publisher = {USENIX Association Berkeley, CA, USA}, 
  abstract = {We propose a new technique for making mix nets robust, called randomized
        partial checking (RPC). The basic idea is that rather than providing a proof of
        completely correct operation, each server provides strong evidence of its correct
        operation by revealing a pseudo-randomly selected subset of its input/output
        relations. Randomized partial checking is exceptionally efficient compared to
        previous proposals for providing robustness; the evidence provided at each layer
        is shorter than the output of that layer, and producing the evidence is easier
        than doing the mixing. It works with mix nets based on any encryption scheme
        (i.e., on public-key alone, and on hybrid schemes using public-key/symmetric-key
        combinations). It also works both with Chaumian mix nets where the messages are
        successively encrypted with each server's key, and with mix nets based on a
        single public key with randomized re-encryption at each layer. Randomized partial
        checking is particularly well suited for voting systems, as it ensures voter
        privacy and provides assurance of correct operation. Voter privacy is ensured
        (either probabilistically or cryptographically) with appropriate design and
        parameter selection. Unlike previous work, our work provides voter privacy as a
        global property of the mix net rather than as a property ensured by a single
        honest server. RPC-based mix nets also provide high assurance of a correct
        election result, since a corrupt server is very likely to be caught if it
        attempts to tamper with even a couple of ballots}, 
  www_section = {electronic voting, public verifiability, randomized partial checking,
        shuffle network}, 
  isbn = {1-931971-00-5}, 
  url = {http://portal.acm.org/citation.cfm?id=647253.720294}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/randomized-checking.pdf},
}
rao-pseudonymity
@conference{rao-pseudonymity,
  title = {Can Pseudonymity Really Guarantee Privacy?}, 
  author = {Josyula R. Rao and Pankaj Rohatgi}, 
  booktitle = {Proceedings of the 9th USENIX Security Symposium}, 
  organization = {USENIX}, 
  year = {2000}, 
  month = {August}, 
  pages = {85--96}, 
  publisher = {USENIX}, 
  abstract = {One of the core challenges facing the Internet today is the problem of
        ensuring privacy for its users. It is believed that mechanisms such as anonymity
        and pseudonymity are essential building blocks in formulating solutions to
        address these challenges and considerable effort has been devoted towards
        realizing these primitives in practice. The focus of this effort, however, has
        mostly been on hiding explicit identify information (such as source addresses) by
        employing a combination of anonymizing proxies, cryptographic techniques to
        distribute trust among them and traffic shaping techniques to defeat traffic
        analysis. We claim that such approaches ignore a significant amount of
        identifying information about the source that leaks from the contents of web
        traffic itself. In this paper, we demonstrate the significance and value of such
        information by showing how techniques from linguistics and stylometry can use
        this information to compromise pseudonymity in several important settings. We
        discuss the severity of this problem and suggest possible countermeasures}, 
  www_section = {anonymity, pseudonym}, 
  url = {http://portal.acm.org/citation.cfm?id=1251313}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/rao.pdf}, 
}
raykova-pet2008
@conference{raykova-pet2008,
  title = {PAR: Payment for Anonymous Routing}, 
  author = {Elli Androulaki and Mariana Raykova and Shreyas Srivatsan and Angelos Stavrou
        and Steven M. Bellovin}, 
  booktitle = {Proceedings of the Eighth International Symposium on Privacy Enhancing
        Technologies (PETS 2008)}, 
  organization = {Springer}, 
  year = {2008}, 
  month = {July}, 
  address = {Leuven, Belgium}, 
  pages = {219--236}, 
  editor = {Borisov, Nikita and Ian Goldberg}, 
  publisher = {Springer}, 
  abstract = {Despite the growth of the Internet and the increasing concern for privacy of
        online communications, current deployments of anonymization networks depend on a
        very small set of nodes that volunteer their bandwidth. We believe that the main
        reason is not disbelief in their ability to protect anonymity, but rather the
        practical limitations in bandwidth and latency that stem from limited
        participation. This limited participation, in turn, is due to a lack of
        incentives to participate. We propose providing economic incentives, which
        historically have worked very well. In this paper, we demonstrate a payment
        scheme that can be used to compensate nodes which provide anonymity in Tor, an
        existing onion routing, anonymizing network. We show that current anonymous
        payment schemes are not suitable and introduce a hybrid payment system based on a
        combination of the Peppercoin Micropayment system and a new type of
        {\textquotedblleft}one use{\textquotedblright} electronic cash. Our system claims
        to maintain users' anonymity, although payment techniques mentioned previously --
        when adopted individually -- provably fail}, 
  www_section = {anonymity, onion routing, Tor}, 
  doi = {10.1007/978-3-540-70630-4}, 
  url = {http://www.springerlink.com/content/r1h1046823587382/}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/raykova-pet2008.pdf}, 
}
raymond00
@conference{raymond00,
  title = {Traffic Analysis: Protocols, Attacks, Design Issues, and Open Problems}, 
  author = {Jean-Fran{\c c}ois Raymond}, 
  booktitle = {Proceedings of Designing Privacy Enhancing Technologies: Workshop on Design
        Issues in Anonymity and Unobservability}, 
  organization = {Springer-Verlag, LNCS 2009}, 
  year = {2000}, 
  month = {July}, 
  pages = {10--29}, 
  publisher = {Springer-Verlag, LNCS 2009}, 
  abstract = {We present the trafic analysis problem and expose the most important
        protocols, attacks and design issues. Afterwards, we propose directions for
        further research. As we are mostly interested in efficient and practical Internet
        based protocols, most of the emphasis is placed on mix based constructions. The
        presentation is informal in that no complex definitions and proofs are presented,
        the aim being more to give a thorough introduction than to present deep new
        insights}, 
  www_section = {traffic analysis}, 
  isbn = {3-540-41724-9}, 
  doi = {10.1007/3-540-44702-4}, 
  url = {http://portal.acm.org/citation.cfm?id=371972}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/raymond00.pdf}, 
}
realtime-mix
@article{realtime-mix,
  title = {Real-Time MIXes: A Bandwidth-Efficient Anonymity Protocol}, 
  author = {Anja Jerichow and Jan M{\"u}ller and Andreas Pfitzmann and Birgit Pfitzmann and
        Michael Waidner}, 
  journal = {IEEE Journal on Selected Areas in Communications}, 
  volume = {16}, 
  number = {4}, 
  year = {1998}, 
  pages = {495--509}, 
  abstract = {We present techniques for efficient anonymous communication with real-time
        constraints as necessary for services like telephony, where a continuous data
        stream has to be transmitted. For concreteness, we present the detailed protocols
        for the narrow-band ISDN (integrated services digital network), although the
        heart of our techniques-anonymous channels-can also be applied to other networks.
        For ISDN, we achieve the same data rate as without anonymity, using the same
        subscriber lines and without any significant modifications to the long-distance
        network. A precise performance analysis is given. Our techniques are based on
        mixes, a method for anonymous communication for e-mail-like services introduced
        by D. Chaum (1981)}, 
  www_section = {anonymity, performance analysis}, 
  issn = {0733-8716}, 
  doi = {10.1109/49.668973}, 
  url = {http://ieeexplore.ieee.org/Xplore/login.jsp?url=http\%3A\%2F\%2Fieeexplore.ieee.org\%2Fiel4\%2F49\%2F14639\%2F00668973.pdf\%3Farnumber\%3D668973\&authDecision=-203},
}
reardon-thesis
@mastersthesis{reardon-thesis,
  title = {Improving Tor using a TCP-over-DTLS Tunnel}, 
  author = {Reardon, Joel}, 
  school = {University of Waterloo}, 
  year = {2008}, 
  month = {September}, 
  type = {masters}, 
  abstract = {The Tor network gives anonymity to Internet users by relaying their traffic
        through the world over a variety of routers. This incurs latency, and this thesis
        first explores where this latency occurs. Experiments discount the latency
        induced by routing traffic and computational latency to determine there is a
        substantial component that is caused by delay in the communication path. We
        determine that congestion control is causing the delay. Tor multiplexes multiple
        streams of data over a single TCP connection. This is not a wise use of TCP, and
        as such results in the unfair application of congestion control. We illustrate an
        example of this occurrence on a Tor node on the live network and also illustrate
        how packet dropping and reordering cause interference between the multiplexed
        streams. Our solution is to use a TCP-over-DTLS (Datagram Transport Layer
        Security) transport between routers, and give each stream of data its own TCP
        connection. We give our design for our proposal, and details about its
        implementation. Finally, we perform experiments on our implemented version to
        illustrate that our proposal has in fact resolved the multiplexing issues
        discovered in our system performance analysis. The future work gives a number of
        steps towards optimizing and improving our work, along with some tangential ideas
        that were discovered during research. Additionally, the open-source software
        projects latency proxy and libspe, which were designed for our purposes but
        programmed for universal applicability, are discussed}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/reardon-thesis.pdf}, 
%%%%% ERROR: Missing field
% www_section = {?????},
}
reed60polynomial
@article{reed60polynomial,
  title = {Polynomial codes over certain finite fields}, 
  author = {Irving Reed and Golomb Solomon}, 
  journal = {Journal of the Society of Industrial and Applied Mathematics}, 
  volume = {8}, 
  number = {2}, 
  year = {1960}, 
  month = {June}, 
  pages = {300--304}, 
  www_section = {filing-erasure-coding}, 
  url = {http://www.jstor.org/pss/2098968}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Reed\%20\%26\%20Solomon\%20-\%20Polynomial\%20Codes\%20Over\%20Certain\%20Finite\%20Fields.pdf},
}
regroup2006
@article{regroup2006,
  title = {Regroup-And-Go mixes to counter the (n-1) attack}, 
  author = {Jin-Qiao Shi and Bin-Xing Fang and Li-Jie Shao}, 
  journal = {Journal of Internet Research}, 
  volume = {16}, 
  number = {2}, 
  year = {2006}, 
  pages = {213--223}, 
  publisher = {Emerald Group Publishing Limited}, 
  type = {Journal}, 
  abstract = {The (n-1) attack is the most powerful attack against mix which is the basic
        building block of many modern anonymous systems. This paper aims to present a
        strategy that can be implemented in mix networks to detect and counter the active
        attacks, especially the (n-1) attack and its variants}, 
  www_section = {anonymity, mix, privacy}, 
  issn = {1066-2243}, 
  doi = {10.1108/10662240610656528}, 
  url = {http://www.emeraldinsight.com/Insight/viewContentItem.do;jsessionid=6C3CF32A99DF3971C2144B461C8F2CF5?contentType=Article\&hdAction=lnkpdf\&contentId=1550662},
}
reiter:ccs2004
@conference{reiter:ccs2004,
  title = {Fragile Mixing}, 
  author = {Michael K. Reiter and XiaoFeng Wang}, 
  booktitle = {Proceedings of the 11th ACM Conference on Computer and Communications
        Security (CCS 2004)}, 
  organization = {ACM Press}, 
  year = {2004}, 
  month = {October}, 
  address = {Washington DC, USA}, 
  publisher = {ACM Press}, 
  abstract = {No matter how well designed and engineered, a mix server offers little
        protection if its administrator can be convinced to log and selectively disclose
        correspondences between its input and output messages, either for profit or to
        cooperate with an investigation. In this paper we propose a technique, fragile
        mixing, to discourage an administrator from revealing such correspondences,
        assuming he is motivated to protect the unlinkability of other communications
        that flow through the mix (e.g., his own). Briefly, fragile mixing implements the
        property that any disclosure of an input-message-to-output-message correspondence
        discloses all such correspondences for that batch of output messages. We detail
        this technique in the context of a re-encryption mix, its integration with a mix
        network, and incentive and efficiency issues}, 
  www_section = {anonymity, mix, privacy, unlinkability}, 
  isbn = {1-58113-961-6}, 
  doi = {10.1145/1030083.1030114}, 
  url = {http://portal.acm.org/citation.cfm?id=1030114}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/reiter-ccs2004.pdf}, 
}
remailer-history
@article{remailer-history,
  title = {Prospects for Remailers}, 
  author = {Sameer Parekh}, 
  journal = {First Monday}, 
  volume = {1}, 
  number = {2}, 
  year = {1996}, 
  month = {August}, 
  abstract = {Remailers have permitted Internet users to take advantage of the medium as a
        means to communicate with others globally on sensitive issues while maintaining a
        high degree of privacy. Recent events have clearly indicated that privacy is
        increasingly at risk on the global networks. Individual efforts have, so far,
        worked well in maintaining for most Internet users a modicum of anonymity. With
        the growth of increasingly sophisticated techniques to defeat anonymity, there
        will be a need for both standards and policies to continue to make privacy on the
        Internet a priority}, 
  url = {http://131.193.153.231/www/issues/issue2/remailers/index.html}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Prospects\%20for\%20Remailers.pdf},
  www_section = {Unsorted}, 
}
rep-anon
@conference{rep-anon,
  title = {Reputation in P2P Anonymity Systems}, 
  author = {Roger Dingledine and Nick Mathewson and Paul Syverson}, 
  booktitle = {Proceedings of Workshop on Economics of Peer-to-Peer Systems}, 
  year = {2003}, 
  month = {June}, 
  abstract = {Decentralized anonymity systems tend to be unreliable, because users must
        choose nodes in the network without knowing the entire state of the network.
        Reputation systems promise to improve reliability by predicting network state. In
        this paper we focus on anonymous remailers and anonymous publishing, explain why
        the systems can benefit from reputation, and describe our experiences designing
        reputation systems for them while still ensuring anonymity. We find that in each
        example we first must redesign the underlying anonymity system to support
        verifiable transactions}, 
  www_section = {anonymity, anonymous publishing, remailer, reputation}, 
  url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.14.4740}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/rep-anon.pdf}, 
}
reusable-channels:wpes2003
@conference{reusable-channels:wpes2003,
  title = {Reusable Anonymous Return Channels}, 
  author = {Philippe Golle and Jakobsson, Markus}, 
  booktitle = {Proceedings of the Workshop on Privacy in the Electronic Society (WPES
        2003)}, 
  organization = {ACM New York, NY, USA}, 
  year = {2003}, 
  month = {October}, 
  address = {Washington, DC, USA}, 
  publisher = {ACM New York, NY, USA}, 
  abstract = {Mix networks are used to deliver messages anonymously to recipients, but do
        not straightforwardly allow the recipient of an anonymous message to reply to its
        sender. Yet the ability to reply one or more times, and to further reply to
        replies, is essential to a complete anonymous conversation. We propose a protocol
        that allows a sender of anonymous messages to establish a reusable anonymous
        return channel. This channel enables any recipient of one of these anonymous
        messages to send back one or more anonymous replies. Recipients who reply to
        different messages can not test whether two return channels are the same, and
        there-fore can not learn whether they are replying to the same person. Yet the
        fact that multiple recipients may send multiple replies through the same return
        channel helps defend against the counting attacks that defeated earlier proposals
        for return channels. In these attacks, an adversary traces the origin of a
        message by sending a specific number of replies and observing who collects the
        same number of messages. Our scheme resists these attacks because the replies
        sent by an attacker are mixed with other replies submitted by other recipients
        through the same return channel. Moreover, our protocol straightforwardly allows
        for replies to replies, etc. Our protocol is based upon a re-encryption mix
        network, and requires four times the amount of computation and communication of a
        basic mixnet}, 
  www_section = {anonymity, privacy, return address}, 
  isbn = {1-58113-776-1}, 
  doi = {10.1145/1005140.1005155}, 
  url = {http://portal.acm.org/citation.cfm?id=1005155}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/reusable-channels-wpes2003.pdf},
}
rewebber
@article{rewebber,
  title = {TAZ servers and the rewebber network: Enabling anonymous publishing on the world
        wide web}, 
  author = {Ian Goldberg and David Wagner}, 
  journal = {First Monday}, 
  volume = {3}, 
  number = {4}, 
  year = {1997}, 
  month = {August}, 
  abstract = {The World Wide Web has recently matured enough to provide everyday users with
        an extremely cheap publishing mechanism. However, the current WWW architecture
        makes it fundamentally difficult to provide content without identifying yourself.
        We examine the problem of anonymous publication on the WWW, propose a design
        suitable for practical deployment, and describe our implementation. Some key
        features of our design include universal accessibility by pre-existing clients,
        short persistent names, security against social, legal, and political pressure,
        protection against abuse, and good performance}, 
  www_section = {anonymous publishing}, 
  doi = {10.1.1.41.4031}, 
  url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.41.4031}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.41.4031.pdf}, 
}
rhea2005fixing
@conference{rhea2005fixing,
  title = {Fixing the embarrassing slowness of OpenDHT on PlanetLab}, 
  author = {Rhea, S. and Chun, B.G. and Kubiatowicz, J. and S Shenker}, 
  booktitle = {Proc. of the Second USENIX Workshop on Real, Large Distributed Systems}, 
  year = {2005}, 
  pages = {25--30}, 
  www_section = {distributed hash table, openDHT, peer-to-peer, PlanetLab}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/opendht-fixing.pdf}, 
}
ringstwice07
@conference{ringstwice07,
  title = {Subliminal Channels in the Private Information Retrieval Protocols}, 
  author = {Meredith L. Patterson and Len Sassaman}, 
  booktitle = {Proceedings of the 28th Symposium on Information Theory in the Benelux}, 
  organization = {Werkgemeenschap voor Informatie- en Communicatietheorie}, 
  year = {2007}, 
  address = {Enschede,NL}, 
  publisher = {Werkgemeenschap voor Informatie- en Communicatietheorie}, 
  abstract = {Information-theoretic private information retrieval (PIR) protocols, such as
        those described by Chor et al. [5], provide a mechanism by which users can
        retrieve information from a database distributed across multiple servers in such
        a way that neither the servers nor an outside observer can determine the contents
        of the data being retrieved. More recent PIR protocols also provide protection
        against Byzantine servers, such that a user can detect when one or more servers
        have attempted to tamper with the data he has requested. In some cases (as in the
        protocols presented by Beimel and Stahl [1]), the user can still recover his data
        and protect the contents of his query if the number of Byzantine servers is below
        a certain threshold; this property is referred to as Byzantine-recovery. However,
        tampering with a user's data is not the only goal a Byzantine server might have.
        We present a scenario in which an arbitrarily sized coalition of Byzantine
        servers transforms the userbase of a PIR network into a signaling framework with
        varying levels of detectability by means of a subliminal channel [11]. We
        describe several such subliminal channel techniques, illustrate several use-cases
        for this subliminal channel, and demonstrate its applicability to a wide variety
        of PIR protocols}, 
  www_section = {private information retrieval}, 
  url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.80.9190}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/ringstwice07.pdf}, 
}
roca03design
@booklet{roca03design,
  title = {Design and evaluation of a low density generator matrix}, 
  author = {Vincent Roca and Zainab Khallouf and Julien Laboure}, 
  year = {2003}, 
  abstract = {Traditional small block Forward Error Correction (FEC) codes, like the
        Reed-Solomon erasure (RSE) code, are known to raise efficiency problems, in
        particular when they are applied to the Asynchronous Layered Coding (ALC)
        reliable multicast protocol. In this paper we describe the design of a simple
        large block Low Density Generator Matrix (LDGM) codec, a particular case of LDPC
        code, which is capable of operating on source blocks that are several tens of
        megabytes long. We also explain how the iterative decoding feature of LDGM/LDPC
        can be used to protect a large number of small independent objects during
        time-limited partially-reliable sessions. We illustrate this feature with an
        example derived from a video streaming scheme over ALC. We then evaluate our LDGM
        codec and compare its performances with a well known RSE codec. Tests focus on
        the global efficiency and on encoding/decoding performances. This paper
        deliberately skips theoretical aspects to focus on practical results. It shows
        that LDGM/LDPC open many opportunities in the area of bulk data multicasting}, 
  www_section = {ALC, FEC, large block FEC codes, LDGM, LDPC, reliable multicast}, 
  isbn = {978-3-540-20051-2}, 
  doi = {10.1007/b13249}, 
  url = {http://www.springerlink.com/content/tdemq6m8b20320hb/}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/ngc03_ldpc_slides_03sep18_4s.pdf},
}
rossi2012modelnet
@article{rossi2012modelnet,
  title = {ModelNet-TE: An emulation tool for the study of P2P and traffic engineering
        interaction dynamics}, 
  author = {Rossi, D. and Veglia, P. and Sammarco, M. and Larroca, F.}, 
  journal = {Peer-to-Peer Networking and Applications}, 
  year = {2012}, 
  pages = {1--19}, 
  publisher = {Springer}, 
  www_section = {emulation, ModelNet, P2P emulation, traffic engineering}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/modelnet-si-ppna11.pdf},
  url = {https://bibliography.gnunet.org}, 
}
saballus07distributed
@conference{saballus07distributed,
  title = {Towards a Distributed Java VM in Sensor Networks using Scalable Source Routing}, 
  author = {Bjoern Saballus and Johannes Eickhold and Thomas Fuhrmann}, 
  booktitle = {6. Fachgespraech Sensornetzwerke der GI/ITG Fachgruppe ''Kommunikation und
        Verteilte Systeme''}, 
  year = {2007}, 
  address = {Aachen, Germany}, 
  pages = {47--50}, 
  abstract = {One of the major drawbacks of small embedded systems such as sensor nodes is
        the need to program in a low level programming language like C or assembler. The
        resulting code is often unportable, system specific and demands deep knowledge of
        the hardware details. This paper motivates the use of Java as an alternative
        programming language. We focus on the tiny AmbiComp Virtual Machine (ACVM) which
        we currently develop as the main part of a more general Java based development
        platform for interconnected sensor nodes. This VM is designed to run on different
        small embedded devices in a distributed network. It uses the novel scalable
        source routing (SSR) algorithm to distribute and share data and workload. SSR
        provides key based routing which enables distributed hash table (DHT) structures
        as a substrate for the VM to disseminate and access remote code and objects. This
        approach allows all VMs in the network to collaborate. The result looks like one
        large, distributed VM which supports a subset of the Java language. The ACVM
        substitutes functionality of an operating system which is missing on the target
        platform. As this development is work in progress, we outline the ideas behind
        this approach to provide first insights into the upcoming problems}, 
  www_section = {distributed hash table, scalable source routing}, 
  url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.70.7724}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/saballus07distributed.pdf},
}
saballus07secure
@conference{saballus07secure,
  title = {Secure Group Communication in Ad-Hoc Networks using Tree Parity Machines}, 
  author = {Bjoern Saballus and Sebastian Wallner and Markus Volkmer}, 
  booktitle = {KiVS 2007}, 
  organization = {VDE Verlag}, 
  year = {2007}, 
  month = {February}, 
  address = {Bern, Switzerland}, 
  pages = {457--468}, 
  publisher = {VDE Verlag}, 
  abstract = {A fundamental building block of secure group communication is the
        establishment of a common group key. This can be divided into key agreement and
        key distribution. Common group key agreement protocols are based on the
        Diffie-Hellman (DH) key exchange and extend it to groups. Group key distribution
        protocols are centralized approaches which make use of one or more special key
        servers. In contrast to these approaches, we present a protocol which makes use
        of the Tree Parity Machine key exchange between multiple parties. It does not
        need a centralized server and therefore is especially suitable for ad-hoc
        networks of any kind}, 
  www_section = {ad-hoc networks}, 
  isbn = {978-3-8007-2980-7}, 
  url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.70.9413}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/saballus07secure.pdf}, 
}
saballus08gaos
@conference{saballus08gaos,
  title = {Global Accessible Objects (GAOs) in the Ambicomp Distributed Java Virtual
        Machine}, 
  author = {Bjoern Saballus and Johannes Eickhold and Thomas Fuhrmann}, 
  booktitle = {Proceedings of the Second International Conference on Sensor Technologies
        and Applications (SENSORCOMM 2008)}, 
  organization = {IEEE Computer Society}, 
  year = {2008}, 
  address = {Cap Esterel, France}, 
  publisher = {IEEE Computer Society}, 
  abstract = {As networked embedded sensors and actuators become more and more widespread,
        software developers encounter the difficulty to create applications that run
        distributed on these nodes: Typically, these nodes are heterogeneous,
        resource-limited, and there is no centralized control. The Ambicomp project
        tackles this problem. Its goal is to provide a distributed Java Virtual Machine
        (VM) that runs on the bare sensor node hardware. This VM creates a single system
        illusion across several nodes. Objects and threads can migrate freely between
        these nodes. In this paper, we address the problem of globally accessible
        objects. We describe how scalable source routing, a DHT-inspired routing
        protocol, can be used to allow access to objects regardless of their respective
        physical location and without any centralized component}, 
  www_section = {distributed hash table}, 
  url = {http://i30www.ira.uka.de/research/publications/p2p/}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/saballus08gaos.pdf}, 
}
sandberg06distrouting
@conference{sandberg06distrouting,
  title = {Distributed Routing in Small-World Networks}, 
  author = {Sandberg, Oskar}, 
  booktitle = {Algorithm Engineering and Experiments}, 
  organization = {SIAM}, 
  year = {2006}, 
  publisher = {SIAM}, 
  abstract = {Theoretical basis for the routing protocol of Freenet 0.7}, 
  www_section = {small-world}, 
  url = {http://www.math.chalmers.se/~ossa/wrt.html}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/swroute.pdf}, 
}
sassaman-pet2008
@conference{sassaman-pet2008,
  title = {How to Bypass Two Anonymity Revocation Systems}, 
  author = {George Danezis and Len Sassaman}, 
  booktitle = {Proceedings of the Eighth International Symposium on Privacy Enhancing
        Technologies (PETS 2008)}, 
  organization = {Springer}, 
  year = {2008}, 
  month = {July}, 
  address = {Leuven, Belgium}, 
  pages = {187--201}, 
  editor = {Borisov, Nikita and Ian Goldberg}, 
  publisher = {Springer}, 
  abstract = {In recent years, there have been several proposals for anonymous
        communication systems that provide intentional weaknesses to allow anonymity to
        be circumvented in special cases. These anonymity revocation schemes attempt to
        retain the properties of strong anonymity systems while granting a special class
        of people the ability to selectively break through their protections. We evaluate
        the two dominant classes of anonymity revocation systems, and identify
        fundamental flaws in their architecture, leading to a failure to ensure proper
        anonymity revocation, as well as introducing additional weaknesses for users not
        targeted for anonymity revocation}, 
  isbn = {978-3-540-70629-8}, 
  doi = {10.1007/978-3-540-70630-4}, 
  url = {http://www.springerlink.com/content/179453h161722821/}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/sassaman-pet2008.pdf}, 
%%%%% ERROR: Missing field
% www_section = {?????},
}
sassaman:wpes2005
@conference{sassaman:wpes2005,
  title = {The Pynchon Gate: A Secure Method of Pseudonymous Mail Retrieval}, 
  author = {Len Sassaman and Bram Cohen and Nick Mathewson}, 
  booktitle = {Proceedings of the Workshop on Privacy in the Electronic Society (WPES
        2005)}, 
  organization = {ACM New York, NY, USA}, 
  year = {2005}, 
  month = {November}, 
  address = {Arlington, VA, USA}, 
  publisher = {ACM New York, NY, USA}, 
  abstract = {We describe the Pynchon Gate, a practical pseudonymous message retrieval
        system. Our design uses a simple distributed-trust private information retrieval
        protocol to prevent adversaries from linking recipients to their pseudonyms, even
        when some of the infrastructure has been compromised. This approach resists
        global traffic analysis significantly better than existing deployed pseudonymous
        email solutions, at the cost of additional bandwidth. We examine security
        concerns raised by our model, and propose solutions}, 
  www_section = {private information retrieval, pseudonym}, 
  isbn = {1-59593-228-3}, 
  doi = {10.1145/1102199.1102201}, 
  url = {http://portal.acm.org/citation.cfm?id=1102199.1102201}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/sassaman-wpes2005.pdf}, 
}
schanzen-2020
@phdthesis{schanzen-2020,
  title = {Towards Self-sovereign, decentralized personal data sharing and identity
        management}, 
  author = {Schanzenbach, Martin}, 
  school = {Technische Universit{\"a}t M{\"u}nchen}, 
  year = {2020}, 
  address = {Munich}, 
  type = {Dissertation}, 
  keywords = {DNS, GNU Name System, GNUnet, privacy, ReclaimID}, 
  www_section = {Self-sovereign identity, GNUnet, GNU Name System}, 
  www_tags = {selected}, 
  www_pdf_url = {http://mediatum.ub.tum.de/?id=1545514}, 
  url = {https://bibliography.gnunet.org}, 
  abstract = {Today, identity management is a key element for commercial and private
        services on the Internet. Over the past decade, digital identities evolved away
        from decentralized, pseudonymous, user-controlled personas towards centralized,
        unabiguous identities managed at and provided through service providers. This
        development was sparked by the requirement of real identities in the context of
        electronic commerce. However, it was particularly fuelled later by the emergence
        of social media and the possibilities it provides to people in order to establish
        social connections. The following centralization of identities at a handful of
        service providers significantly improved usability and reliability of identity
        services. Those benefits come at the expense of other, arguably equally important
        areas. For users, it is privacy and the permanent threat of being tracked and
        analyzed. For service providers, it is liability and the risk of facing
        significant punishment caused by strict privacy regulations which try to
        counteract the former. In this thesis, we investigate state-of-the-art approaches
        to modern identity management. We take a look at existing standards and recent
        research in order to understand the status quo and how it can be improved. As a
        result from our research, we present the following contributions: In order to
        allow users to reclaim control over their identities and personal data, we
        propose a design for a decentralized, self-sovereign directory service. This
        service allows users to share personal data with services without the need of a
        trusted third party. Unlike existing research in this area, we propose mechanisms
        which allow users to efficiently enforce access control on their data. Further,
        we investigate how trust can be established in user-managed, self-sovereign
        identities. We propose a trust establishment mechanism through the use of secure
        name systems. It allows users and organizations to establish trust relationships
        and identity assertions without the need of centralized public key
        infrastructures (PKIs). Additionally, we show how recent advancements in the area
        of non-interactive zero-knowledge (NIZK) protocols can be leveraged in order to
        create privacy-preserving attribute-based credentials (PP-ABCs) suitable for use
        in self-sovereign identity systems including our proposed directory service. We
        provide proof of concept implementations of our designs and evaluate them to show
        that they are suitable for practical applications.}, 
}
scheibner-thesis2014
@mastersthesis{scheibner-thesis2014,
  title = {Control Flow Analysis for Event-Driven Programs}, 
  author = {Florian Scheibner}, 
  school = {Technical University of Munich}, 
  volume = {B.Sc}, 
  year = {2014}, 
  month = {July}, 
  address = {Munich}, 
  pages = {0--71}, 
  type = {Bachelors}, 
  abstract = {Static analysis is often used to automatically check for common bugs in
        programs. Compilers already check for some common programming errors and issue
        warnings; however, they do not do a very deep analysis because this would slow
        the compilation of the program down. Specialized tools like Coverity or Clang
        Static Analyzer look at possible runs of a program and track the state of
        variables in respect to function calls. This information helps to identify
        possible bugs. In event driven programs like GNUnet callbacks are registered for
        later execution. Normal static analysis cannot track these function calls. This
        thesis is an attempt to extend different static analysis tools so that they can
        handle this case as well. Different solutions were thought of and executed with
        Coverity and Clang. This thesis describes the theoretical background of model
        checking and static analysis, the practical usage of wide spread static analysis
        tools, and how these tools can be extended in order to improve their usefulness}, 
  www_section = {event-driven, flow control, GNUnet, static analysis}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/scheibner_thesis.pdf}, 
  www_tags = {selected}, 
  url = {https://bibliography.gnunet.org}, 
}
sep-privacy
@book{sep-privacy,
  title = {Privacy}, 
  author = {DeCew, Judith}, 
  booktitle = {The Stanford Encyclopedia of Philosophy}, 
  year = {2013}, 
  edition = {Fall 2013}, 
  editor = {Edward N. Zalta}, 
  publisher = {unknown}, 
  www_section = {Unsorted}, 
  url = {https://bibliography.gnunet.org}, 
}
shimshock-pet2008
@conference{shimshock-pet2008,
  title = {Breaking and Provably Fixing Minx}, 
  author = {Eric Shimshock and Matt Staats and Nicholas J. Hopper}, 
  booktitle = {Proceedings of the Eighth International Symposium on Privacy Enhancing
        Technologies (PETS 2008)}, 
  organization = {Springer}, 
  year = {2008}, 
  month = {July}, 
  address = {Leuven, Belgium}, 
  pages = {99--114}, 
  publisher = {Springer}, 
  abstract = {In 2004, Danezis and Laurie proposed Minx, an encryption protocol and packet
        format for relay-based anonymity schemes, such as mix networks and onion routing,
        with simplicity as a primary design goal. Danezis and Laurie argued informally
        about the security properties of Minx but left open the problem of proving its
        security. In this paper, we show that there cannot be such a proof by showing
        that an active global adversary can decrypt Minx messages in polynomial time. To
        mitigate this attack, we also prove secure a very simple modification of the Minx
        protocol}, 
  www_section = {attack, onion routing}, 
  isbn = {978-3-540-70629-8}, 
  doi = {10.1007/978-3-540-70630-4_7}, 
  url = {http://portal.acm.org/citation.cfm?id=1428259.1428266}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/shimshock-pet2008.pdf}, 
}
shsm03
@article{shsm03,
  title = {Using Caching for Browsing Anonymity}, 
  author = {Anna Shubina and Sean Smith}, 
  journal = {ACM SIGEcom Exchanges}, 
  volume = {4}, 
  number = {2}, 
  year = {2003}, 
  month = {September}, 
  pages = {11--20}, 
  abstract = {Privacy-providing tools, including tools that provide anonymity, are gaining
        popularity in the modern world. Among the goals of their users is avoiding
        tracking and profiling. While some businesses are unhappy with the growth of
        privacy-enhancing technologies, others can use lack of information about their
        users to avoid unnecessary liability and even possible harassment by parties with
        contrary business interests, and to gain a competitive market edge.Currently,
        users interested in anonymous browsing have the choice only between single-hop
        proxies and the few more complex systems that are available. These still leave
        the user vulnerable to long-term intersection attacks.In this paper, we propose a
        caching proxy system for allowing users to retrieve data from the World-Wide Web
        in a way that would provide recipient unobservability by a third party and sender
        unobservability by the recipient and thus dispose with intersection attacks, and
        report on the prototype we built using Google}, 
  www_section = {anonymity, caching proxies, privacy}, 
  doi = {10.1145/1120709.1120713}, 
  url = {http://portal.acm.org/citation.cfm?id=1120713}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/shsm03.pdf}, 
}
shuffle:ccs01
@conference{shuffle:ccs01,
  title = {A Verifiable Secret Shuffle and its Application to E-Voting}, 
  author = {Andrew C. Neff}, 
  booktitle = {Proceedings of the 8th ACM Conference on Computer and Communications
        Security (CCS 2001)}, 
  organization = {ACM Press}, 
  year = {2001}, 
  month = {November}, 
  pages = {116--125}, 
  editor = {Pierangela Samarati}, 
  publisher = {ACM Press}, 
  abstract = {We present a mathematical construct which provides a cryptographic protocol
        to verifiably shuffle a sequence of k modular integers, and discuss its
        application to secure, universally verifiable, multi-authority election schemes.
        The output of the shuffle operation is another sequence of k modular integers,
        each of which is the same secret power of a corresponding input element, but the
        order of elements in the output is kept secret. Though it is a trivial matter for
        the "shuffler" (who chooses the permutation of the elements to be applied) to
        compute the output from the input, the construction is important because it
        provides a linear size proof of correctness for the output sequence (i.e. a proof
        that it is of the form claimed) that can be checked by an arbitrary verifiers.
        The complexity of the protocol improves on that of Furukawa-Sako[16] both
        measured by number of exponentiations and by overall size.The protocol is shown
        to be honest-verifier zeroknowledge in a special case, and is computational
        zeroknowledge in general. On the way to the final result, we also construct a
        generalization of the well known Chaum-Pedersen protocol for knowledge of
        discrete logarithm equality [10], [7]. In fact, the generalization specializes
        exactly to the Chaum-Pedersen protocol in the case k = 2. This result may be of
        interest on its own.An application to electronic voting is given that matches the
        features of the best current protocols with significant efficiency improvements.
        An alternative application to electronic voting is also given that introduces an
        entirely new paradigm for achieving Universally Verifiable elections}, 
  www_section = {discrete logarithm, multi-authority}, 
  isbn = {1-58113-385-5}, 
  doi = {10.1145/501983.502000}, 
  url = {http://portal.acm.org/citation.cfm?id=502000}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/shuffle-ccs01.pdf}, 
}
slicing07
@conference{slicing07,
  title = {Information Slicing: Anonymity Using Unreliable Overlays}, 
  author = {Sachin Katti and Jeffery Cohen and Dina Katabi}, 
  booktitle = {Proceedings of the 4th USENIX Symposium on Network Systems Design and
        Implementation (NSDI)}, 
  year = {2007}, 
  month = {April}, 
  abstract = {This paper proposes a new approach to anonymous communication called
        information slicing. Typically, anonymizers use onion routing, where a message is
        encrypted in layers with the public keys of the nodes along the path. Instead,
        our approach scrambles the message, divides it into pieces, and sends the pieces
        along disjoint paths. We show that information slicing addresses message
        confidentiality as well as source and destination anonymity. Surprisingly, it
        does not need any public key cryptography. Further, our approach naturally
        addresses the problem of node failures. These characteristics make it a good fit
        for use over dynamic peer-to-peer overlays. We evaluate the anonymity
        ofinformation slicing via analysis and simulations. Our prototype implementation
        on PlanetLab shows that it achieves higher throughput than onion routing and
        effectively copes with node churn}, 
  www_section = {anonymity, onion routing, P2P, privacy}, 
  url = {http://dspace.mit.edu/handle/1721.1/36344a}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/slicing07.pdf}, 
}
snader08
@conference{snader08,
  title = {A Tune-up for Tor: Improving Security and Performance in the Tor Network}, 
  author = {Robin Snader and Borisov, Nikita}, 
  booktitle = {Proceedings of the Network and Distributed Security Symposium--NDSS '08}, 
  organization = {Internet Society}, 
  year = {2008}, 
  month = {February}, 
  publisher = {Internet Society}, 
  abstract = {The Tor anonymous communication network uses selfreported bandwidth values to
        select routers for building tunnels. Since tunnels are allocated in proportion to
        this bandwidth, this allows a malicious router operator to attract tunnels for
        compromise. Since the metric used is insensitive to relative load, it does not
        adequately respond to changing conditions and hence produces unreliable
        performance, driving many users away. We propose an opportunistic bandwidth
        measurement algorithm to replace selfreported values and address both of these
        problems. We also propose a mechanisms to let users tune Tor performance to
        achieve higher performance or higher anonymity. Our mechanism effectively blends
        the traffic from users of different preferences, making partitioning attacks
        difficult. We implemented the opportunistic measurement and tunable performance
        extensions and examined their performance both analytically and in the real Tor
        network. Our results show that users can get dramatic increases in either
        performance or anonymity with little to no sacrifice in the other metric, or a
        more modest improvement in both. Our mechanisms are also invulnerable to the
        previously published low-resource attacks on Tor}, 
  www_section = {anonymity, Tor}, 
  doi = {10.1109/NCM.2009.205}, 
  url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.140.7368}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/snader08.pdf}, 
}
so64132
@article{so64132,
  title = {How robust are gossip-based communication protocols?}, 
  author = {Lorenzo Alvisi and Jeroen Doumen and Rachid Guerraoui and Boris Koldehofe and
        Harry Li and Robbert Van Renesse and Gilles Tredan}, 
  journal = {Operating Systems Review}, 
  volume = {41}, 
  number = {5}, 
  year = {2007}, 
  month = {October}, 
  pages = {14--18}, 
  publisher = {ACM}, 
  abstract = {Gossip-based communication protocols are often touted as being robust. Not
        surprisingly, such a claim relies on assumptions under which gossip protocols are
        supposed to operate. In this paper, we discuss and in some cases expose some of
        these assumptions and discuss how sensitive the robustness of gossip is to these
        assumptions. This analysis gives rise to a collection of new research
        challenges}, 
  www_section = {robustness}, 
  issn = {0163-5980}, 
  doi = {10.1145/1317379.1317383}, 
  url = {http://doc.utwente.nl/64132/}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/robustgossip-final.pdf},
}
sofem05-Klonowski
@conference{sofem05-Klonowski,
  title = {Anonymous Communication with On-line and Off-line Onion Encoding}, 
  author = {Marek Klonowski and Miroslaw Kutylowski and Filip Zagorski}, 
  booktitle = {Proceedings of Conference on Current Trends in Theory and Practice of
        Informatics (SOFSEM 2005)}, 
  organization = {Springer Berlin / Heidelberg}, 
  year = {2005}, 
  month = {January}, 
  publisher = {Springer Berlin / Heidelberg}, 
  abstract = {Anonymous communication with onions requires that a user application
        determines the whole routing path of an onion. This scenario has certain
        disadvantages, it might be dangerous in some situations, and it does not fit well
        to the current layered architecture of dynamic communication networks. We show
        that applying encoding based on universal re-encryption can solve many of these
        problems by providing much flexibility -- the onions can be created on-the-fly or
        in advance by different parties}, 
  www_section = {onion routing, universal re-encryption}, 
  isbn = {978-3-540-24302-1}, 
  doi = {10.1007/b105088}, 
  url = {http://www.springerlink.com/content/9023b6ad0thaf51p/}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/sofem05-Klonowski.pdf}, 
}
space-efficient
@conference{space-efficient,
  title = {Space-Efficient Private Search}, 
  author = {George Danezis and Claudia Diaz}, 
  booktitle = {Proceedings of Financial Cryptography (FC2007)}, 
  organization = {Springer-Verlag}, 
  year = {2007}, 
  address = {Tobago}, 
  publisher = {Springer-Verlag}, 
  series = {Lecture Notes in Computer Science}, 
  abstract = {Private keyword search is a technique that allows for searching and
        retrieving documents matching certain keywords without revealing the search
        criteria. We improve the space efficiency of the Ostrovsky et al. Private Search
        [9] scheme, by describing methods that require considerably shorter buffers for
        returning the results of the search. Our basic decoding scheme recursive
        extraction, requires buffers of length less than twice the number of returned
        results and is still simple and highly efficient. Our extended decoding schemes
        rely on solving systems of simultaneous equations, and in special cases can
        uncover documents in buffers that are close to 95 \% full. Finally we note the
        similarity between our decoding techniques and the ones used to decode rateless
        codes, and show how such codes can be extracted from encrypted documents}, 
  www_section = {keywords, privacy}, 
  url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.130.7014}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/privsearch-aeolus.pdf}, 
}
springerlink:10.1007/978-0-387-70992-5
@book{springerlink:10.1007/978-0-387-70992-5,
  title = {Privacy-Preserving Data Mining: Models and Algorithms}, 
  author = {Aggarwal, Charu C. and Yu, Philip S.}, 
  organization = {Springer US}, 
  volume = {34}, 
  year = {2008}, 
  publisher = {Springer US}, 
  series = {Advances in Database Systems}, 
  isbn = {978-0-387-70992-5}, 
%%%%% ERROR: Missing field
% www_section = {?????},
}
statistical-disclosure
@conference{statistical-disclosure,
  title = {Statistical Disclosure Attacks: Traffic Confirmation in Open Environments}, 
  author = {George Danezis}, 
  booktitle = {Proceedings of Security and Privacy in the Age of Uncertainty, (SEC2003)}, 
  organization = {IFIP TC11}, 
  year = {2003}, 
  month = {May}, 
  address = {Athens}, 
  pages = {421--426}, 
  publisher = {IFIP TC11}, 
  abstract = {An improvement over the previously known disclosure attack is presented that
        allows, using statistical methods, to effectively deanonymize users of a mix
        system. Furthermore the statistical disclosure attack is computationally
        efficient, and the conditions for it to be possible and accurate are much better
        understood. The new attack can be generalized easily to a variety of anonymity
        systems beyond mix networks}, 
  www_section = {anonymity, statistical analysis, traffic analysis}, 
  url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.13.4512}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/statistical-disclosure.pdf},
}
steinbrecher:pet2003
@conference{steinbrecher:pet2003,
  title = {Modelling Unlinkability}, 
  author = {Sandra Steinbrecher and Stefan K{\"o}psell}, 
  booktitle = {Proceedings of Privacy Enhancing Technologies workshop (PET 2003)}, 
  organization = {Springer-Verlag, LNCS 2760}, 
  year = {2003}, 
  month = {March}, 
  pages = {32--47}, 
  editor = {Roger Dingledine}, 
  publisher = {Springer-Verlag, LNCS 2760}, 
  abstract = {While there have been made several proposals to define and measure anonymity
        (e.g., with information theory, formal languages and logics) unlinkability has
        not been modelled generally and formally. In contrast to anonymity unlinkability
        is not restricted to persons. In fact the unlinkability of arbitrary items can be
        measured. In this paper we try to formalise the notion of unlinkability, give a
        refinement of anonymity definitions based on this formalisation and show the
        impact of unlinkability on anonymity. We choose information theory as a method to
        describe unlinkability because it allows an easy probabilistic description. As an
        illustration for our formalisation we describe its meaning for communication
        systems}, 
  www_section = {anonymity, unlinkability}, 
  isbn = {978-3-540-20610-1}, 
  doi = {10.1007/b94512}, 
  url = {http://www.springerlink.com/content/dxteg659uf2jtdd7/}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/steinbrecher-pet2003.pdf},
}
stepping-stones
@conference{stepping-stones,
  title = {Inter-Packet Delay Based Correlation for Tracing Encrypted Connections through
        Stepping Stones}, 
  author = {Xinyuan Wang and Douglas S. Reeves and S. Felix Wu}, 
  booktitle = {Proceedings of ESORICS 2002}, 
  organization = {Springer Berlin / Heidelberg}, 
  year = {2002}, 
  month = {October}, 
  pages = {244--263}, 
  publisher = {Springer Berlin / Heidelberg}, 
  abstract = {Network based intrusions have become a serious threat to the users of the
        Internet. Intruders who wish to attack computers attached to the Internet
        frequently conceal their identity by staging their attacks through intermediate
        {\textquotedblleft}stepping stones{\textquotedblright}. This makes tracing the
        source of the attack substantially more difficult, particularly if the attack
        traffic is encrypted. In this paper, we address the problem of tracing encrypted
        connections through stepping stones. The incoming and outgoing connections
        through a stepping stone must be correlated to accomplish this. We propose a
        novel correlation scheme based on inter-packet timing characteristics of both
        encrypted and unencrypted connections. We show that (after some filtering)
        inter-packet delays (IPDs) of both encrypted and unencrypted, interactive
        connections are preserved across many router hops and stepping stones. The
        effectiveness of this method for correlation purposes also requires that timing
        characteristics be distinctive enough to identify connections. We have found that
        normal interactive connections such as telnet, SSH and rlogin are almost always
        distinctive enough to provide correct correlation across stepping stones. The
        number of packets needed to correctly correlate two connections is also an
        important metric, and is shown to be quite modest for this method}, 
  www_section = {inter-packet delay, tracing}, 
  isbn = {978-3-540-44345-2}, 
  doi = {10.1007/3-540-45853-0}, 
  url = {http://portal.acm.org/citation.cfm?id=699363}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/2002-08-esorics02-ipd-correlation.pdf},
}
steven-thesis
@mastersthesis{steven-thesis,
  title = {Covert channel vulnerabilities in anonymity systems}, 
  author = {Steven J. Murdoch}, 
  school = {University of Cambridge}, 
  year = {2007}, 
  month = {December}, 
  type = {phd}, 
  abstract = {The spread of wide-scale Internet surveillance has spurred interest in
        anonymity systems that protect users' privacy by restricting unauthorised access
        to their identity. This requirement can be considered as a flow control policy in
        the well established field of multilevel secure systems. I apply previous
        research on covert channels (unintended means to communicate in violation of a
        security policy) to analyse several anonymity systems in an innovative way. One
        application for anonymity systems is to prevent collusion in competitions. I show
        how covert channels may be exploited to violate these protections and construct
        defences against such attacks, drawing from previous covert channel research and
        collusion-resistant voting systems. In the military context, for which multilevel
        secure systems were designed, covert channels are increasingly eliminated by
        physical separation of interconnected single-role computers. Prior work on the
        remaining network covert channels has been solely based on protocol
        specifications. I examine some protocol implementations and show how the use of
        several covert channels can be detected and how channels can be modified to
        resist detection. I show how side channels (unintended information leakage) in
        anonymity networks may reveal the behaviour of users. While drawing on previous
        research on traffic analysis and covert channels, I avoid the traditional
        assumption of an omnipotent adversary. Rather, these attacks are feasible for an
        attacker with limited access to the network. The effectiveness of these
        techniques is demonstrated by experiments on a deployed anonymity network, Tor.
        Finally, I introduce novel covert and side channels which exploit thermal
        effects. Changes in temperature can be remotely induced through CPU load and
        measured by their effects on crystal clock skew. Experiments show this to be an
        effective attack against Tor. This side channel may also be usable for
        geolocation and, as a covert channel, can cross supposedly infallible air-gap
        security boundaries. This thesis demonstrates how theoretical models and generic
        methodologies relating to covert channels may be applied to find practical
        solutions to problems in real-world anonymity systems. These findings confirm the
        existing hypothesis that covert channel analysis, vulnerabilities and defences
        developed for multilevel secure systems apply equally well to anonymity systems}, 
  url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.62.5142}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/steven-thesis.pdf}, 
%%%%% ERROR: Missing field
% www_section = {?????},
}
stop-and-go
@conference{stop-and-go,
  title = {Stop-and-Go MIXes: Providing Probabilistic Anonymity in an Open System}, 
  author = {Dogan Kesdogan and Jan Egner and Roland B{\"u}schkes}, 
  booktitle = {Proceedings of Information Hiding Workshop (IH 1998)}, 
  organization = {Springer-Verlag, LNCS 1525}, 
  year = {1998}, 
  publisher = {Springer-Verlag, LNCS 1525}, 
  abstract = {Currently known basic anonymity techniques depend on identity verification.
        If verification of user identities is not possible due to the related management
        overhead or a general lack of information (e.g. on the Internet), an adversary
        can participate several times in a communication relationship and observe the
        honest users. In this paper we focus on the problem of providing anonymity
        without identity verification. The notion of probabilistic anonymity is
        introduced. Probabilistic anonymity is based on a publicly known security
        parameter, which determines the security of the protocol. For probabilistic
        anonymity the insecurity, expressed as the probability of having only one honest
        participant, approaches 0 at an exponential rate as the security parameter is
        changed linearly. Based on our security model we propose a new MIX variant called
        {\textquotedblleft}Stop-and-Go-MIX{\textquotedblright} (SG-MIX) which provides
        anonymity without identity verification, and prove that it is probabilistically
        secure}, 
  www_section = {anonymity, identity verification, security parameter}, 
  isbn = {978-3-540-65386-8}, 
  doi = {10.1007/3-540-49380-8_7}, 
  url = {http://www.springerlink.com/content/hmfv2mgy1xqbn852/}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/stop-and-go.pdf}, 
}
strint2014
@conference{strint2014,
  title = {The Internet is Broken: Idealistic Ideas for Building a GNU Network}, 
  author = {Christian Grothoff and Polot, Bartlomiej and Carlo von Loesch}, 
  booktitle = {W3C/IAB Workshop on Strengthening the Internet Against Pervasive Monitoring
        (STRINT)}, 
  organization = {W3C/IAB}, 
  year = {2014}, 
  month = {February}, 
  address = {London, UK}, 
  publisher = {W3C/IAB}, 
  www_section = {Unsorted}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/strint2014.pdf}, 
  url = {https://bibliography.gnunet.org}, 
}
strong-eternity
@conference{strong-eternity,
  title = {The Strong Eternity Service}, 
  author = {Tonda Benes}, 
  booktitle = {Proceedings of Information Hiding Workshop (IH 2001)}, 
  organization = {Springer-Verlag, LNCS 2137}, 
  year = {2001}, 
  month = {April}, 
  editor = {Ira S. Moskowitz}, 
  publisher = {Springer-Verlag, LNCS 2137}, 
  abstract = {Strong Eternity Service is a safe and very reliable storage for data of high
        importance. We show how to establish persistent pseudonyms in a totally anonymous
        environment and how to create a unique fully distributed name-space allowing both
        computer-efficient and human-acceptable access. We also present a way how to
        retrieve information from such data storage. We adapt the notion of the
        mix-network so that it can provide symmetric anonymity to both the client and the
        server. Finally we propose a system of after-the-act payments that can support
        operation of the Service without compromising anonymity}, 
  www_section = {anonymity service, distributed name-space, pseudonym}, 
  isbn = {978-3-540-42733-9}, 
  doi = {10.1007/3-540-45496-9}, 
  url = {http://portal.acm.org/citation.cfm?id=731726}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/strong-eternity.pdf}, 
}
sync-batching
@conference{sync-batching,
  title = {Synchronous Batching: From Cascades to Free Routes}, 
  author = {Roger Dingledine and Vitaly Shmatikov and Paul Syverson}, 
  booktitle = {Proceedings of Privacy Enhancing Technologies workshop (PET 2004)}, 
  volume = {3424}, 
  year = {2004}, 
  month = {May}, 
  pages = {186--206}, 
  series = {LNCS}, 
  abstract = {The variety of possible anonymity network topologies has spurred much debate
        in recent years. In a synchronous batching design, each batch of messages enters
        the mix network together, and the messages proceed in lockstep through the
        network. We show that a synchronous batching strategy can be used in various
        topologies, including a free-route network, in which senders choose paths freely,
        and a cascade network, in which senders choose from a set of fixed paths. We show
        that free-route topologies can provide better anonymity as well as better message
        reliability in the event of partial network failure}, 
  www_section = {anonymity, network topology}, 
  doi = {10.1007/b136164}, 
  url = {http://www.springerlink.com/content/uqvfwe97ehlldm8d/}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/sync-batching.pdf}, 
}
syverson99
@conference{syverson99,
  title = {Group Principals and the Formalization of Anonymity}, 
  author = {Paul Syverson and Stuart Stubblebine}, 
  booktitle = {Proceedings of the World Congress on Formal Methods (1)}, 
  year = {1999}, 
  month = {January}, 
  pages = {814--833}, 
  abstract = {We introduce the concept of a group principal and present a number of
        different classes of group principals, including threshold-group-principals.
        These appear to naturally useful concepts for looking at security. We provide an
        associated epistemic language and logic and use it to reason about anonymity
        protocols and anonymity services, where protection properties are formulated from
        the intruder's knowledge of group principals. Using our language, we give an
        epistemic characterization of anonymity properties. We also present a
        specification of a simple anonymizing system using our theory}, 
  www_section = {anonymity service}, 
  isbn = {3-540-66587-0}, 
  doi = {10.1007/3-540-48119-2}, 
  url = {http://portal.acm.org/citation.cfm?id=730472}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/1999syverson-fm99.pdf}, 
}
taler2016space
@conference{taler2016space,
  title = {Enabling Secure Web Payments with GNU Taler}, 
  author = {Jeffrey Burdges and Florian Dold and Christian Grothoff and Marcello Stanisci}, 
  booktitle = {6th International Conference on Security, Privacy and Applied Cryptographic
        Engineering}, 
  organization = {Springer}, 
  year = {2016}, 
  month = {December}, 
  address = {Hyderabad}, 
  publisher = {Springer}, 
  abstract = {GNU Taler is a new electronic online payment system which provides privacy
        for customers and accountability for merchants. It uses an exchange service to
        issue digital coins using blind signatures, and is thus not subject to the
        performance issues that plague Byzantine fault-tolerant consensus-based
        solutions. The focus of this paper is addressing the challenges payment systems
        face in the context of the Web. We discuss how to address Web-specific
        challenges, such as handling bookmarks and sharing of links, as well as
        supporting users that have disabled JavaScript. Web payment systems must also
        navigate various constraints imposed by modern Web browser security architecture,
        such as same-origin policies and the separation between browser extensions and
        Web pages. While our analysis focuses on how Taler operates within the security
        infrastructure provided by the modern Web, the results partially generalize to
        other payment systems. We also include the perspective of merchants, as existing
        systems have often struggled with securing payment information at the merchant's
        side. Here, challenges include avoiding database transactions for customers that
        do not actually go through with the purchase, as well as cleanly separating
        security-critical functions of the payment system from the rest of the Web
        service}, 
  www_section = {blind signatures, GNUnet, incentives, payments, Taler, web}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/taler2016space.pdf}, 
  www_tags = {selected}, 
  url = {https://taler.net/en/bibliography.html}, 
}
tap:pet2006
@conference{tap:pet2006,
  title = {On the Security of the Tor Authentication Protocol}, 
  author = {Ian Goldberg}, 
  booktitle = {Proceedings of the Sixth Workshop on Privacy Enhancing Technologies (PET
        2006)}, 
  organization = {Springer}, 
  year = {2006}, 
  month = {June}, 
  address = {Cambridge, UK}, 
  pages = {316--331}, 
  editor = {George Danezis and Philippe Golle}, 
  publisher = {Springer}, 
  abstract = {Tor is a popular anonymous Internet communication system, used by an
        estimated 250,000 users to anonymously exchange over five terabytes of data per
        day. The security of Tor depends on properly authenticating nodes to clients, but
        Tor uses a custom protocol, rather than an established one, to perform this
        authentication. In this paper, we provide a formal proof of security of this
        protocol, in the random oracle model, under reasonable cryptographic
        assumptions}, 
  www_section = {Tor}, 
  isbn = {978-3-540-68790-0}, 
  doi = {10.1007/11957454}, 
  url = {http://www.springerlink.com/content/n77w19002743xu51/}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/tap-pet2006.pdf}, 
}
tarzan:ccs02
@conference{tarzan:ccs02,
  title = {Tarzan: A Peer-to-Peer Anonymizing Network Layer}, 
  author = {Michael J. Freedman and Robert Morris}, 
  booktitle = {Proceedings of the 9th ACM Conference on Computer and Communications
        Security (CCS 2002)}, 
  organization = {ACM New York, NY, USA}, 
  year = {2002}, 
  month = {November}, 
  address = {Washington, DC}, 
  publisher = {ACM New York, NY, USA}, 
  abstract = {Tarzan is a peer-to-peer anonymous IP network overlay. Because it provides IP
        service, Tarzan is general-purpose and transparent to applications. Organized as
        a decentralized peer-to-peer overlay, Tarzan is fault-tolerant, highly scalable,
        and easy to manage.Tarzan achieves its anonymity with layered encryption and
        multi-hop routing, much like a Chaumian mix. A message initiator chooses a path
        of peers pseudo-randomly through a restricted topology in a way that adversaries
        cannot easily influence. Cover traffic prevents a global observer from using
        traffic analysis to identify an initiator. Protocols toward unbiased
        peer-selection offer new directions for distributing trust among untrusted
        entities.Tarzan provides anonymity to either clients or servers, without
        requiring that both participate. In both cases, Tarzan uses a network address
        translator (NAT) to bridge between Tarzan hosts and oblivious Internet
        hosts.Measurements show that Tarzan imposes minimal overhead over a corresponding
        non-anonymous overlay route}, 
  www_section = {fault-tolerance, overhead, P2P}, 
  isbn = {1-58113-612-9}, 
  doi = {10.1145/586110.586137}, 
  url = {http://portal.acm.org/citation.cfm?id=586137}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/tarzan-ccs02.pdf}, 
}
tau-indy
@conference{tau-indy,
  title = {A Random Server Model for Private Information Retrieval or How to Achieve
        Information Theoretic PIR Avoiding Database Replication}, 
  author = {Yael Gertner and Shafi Goldwasser and Tal Malkin}, 
  booktitle = {Proceedings of the Second International Workshop on Randomization and
        Approximation Techniques in Computer Science (RANDOM '98)}, 
  organization = {Springer-Verlag}, 
  year = {1998}, 
  address = {London, UK}, 
  pages = {200--217}, 
  publisher = {Springer-Verlag}, 
  abstract = {Private information retrieval (PIR) schemes provide a user with information
        from a database while keeping his query secret from the database manager. We
        propose a new model for PIR, utilizing auxiliary random servers providing privacy
        services for database access. The principal database initially engages in a
        preprocessing setup computation with the random servers, followed by the on-line
        stage with the users. Using this model we achieve the first PIR information
        theoretic solutions in which the database does not need to give away its data to
        be replicated, and with minimal on-line computation cost for the database. This
        solves privacy and efficiency problems inherent to all previous solutions.
        Specifically, in all previously existing PIR schemes the database on-line
        computation for one query is at least linear in the size of the data, and all
        previous information theoretic schemes require multiple replications of the
        database which are not allowed to communicate with each other.This poses a
        privacy problem for the database manager, who is required to hand his data to
        multiple foreign entities, and to the user, who is supposed to trust the multiple
        copies of the database not to communicate. In contrast, in our solutions no
        replication is needed, and the database manager only needs to perform O(1) amount
        of computation to answer questions of users, while all the extra computations
        required on line for privacy are done by the auxiliary random servers, who
        contain no information about the data}, 
  www_section = {anonymity, privacy, private information retrieval}, 
  isbn = {3-540-65142-X}, 
  url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.18.6742}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.18.6742.pdf}, 
}
taxonomy-dummy
@conference{taxonomy-dummy,
  title = {Taxonomy of Mixes and Dummy Traffic}, 
  author = {Claudia Diaz and Bart Preneel}, 
  booktitle = {Proceedings of I-NetSec04: 3rd Working Conference on Privacy and Anonymity
        in Networked and Distributed Systems}, 
  year = {2004}, 
  month = {August}, 
  address = {Toulouse, France}, 
  abstract = {This paper presents an analysis of mixes and dummy traffic policies, which
        are building blocks of anonymous services. The goal of the paper is to bring
        together all the issues related to the analysis and design of mix networks. We
        discuss continuous and pool mixes, topologies for mix networks and dummy traffic
        policies. We point out the advantages and disadvantages of design decisions for
        mixes and dummy policies. Finally, we provide a list of research problems that
        need further work}, 
  www_section = {anonymity, dummy traffic, mix}, 
  url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.5.9855}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.5.9855.pdf}, 
}
terminology
@booklet{terminology,
  title = {Anonymity, Unobservability, and Pseudonymity: A Consolidated Proposal for
        Terminology}, 
  author = {Andreas Pfitzmann and Marit Hansen}, 
  year = {2000}, 
  month = {July}, 
  abstract = {Based on the nomenclature of the early papers in the field, we propose a
        terminology which is both expressive and precise. More particularly, we define
        anonymity, unlinkability, unobservability, pseudonymity (pseudonyms and digital
        pseudonyms, and their attributes), and identity management. In addition, we
        describe the relationships between these terms, give a rational why we define
        them as we do, and sketch the main mechanisms to provide for the properties
        defined}, 
  url = {http://dud.inf.tu-dresden.de/Anon_Terminology.shtml}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/terminology.pdf}, 
%%%%% ERROR: Missing field
% www_section = {?????},
}
thiele06debruijn
@conference{thiele06debruijn,
  title = {Churn Resistant de Bruijn Networks for Wireless on Demand Systems}, 
  author = {Manuel Thiele and Kendy Kutzner and Thomas Fuhrmann}, 
  booktitle = {Proceedings of the Third Annual Conference on Wireless On demand Network
        Systems and Services}, 
  year = {2006}, 
  address = {Les M{\'e}nuires, France}, 
  type = {publication}, 
  abstract = {Wireless on demand systems typically need authentication, authorization and
        accounting (AAA) services. In a peer-to-peer (P2P) environment these AAA-services
        need to be provided in a fully decentralized manner. This excludes many
        cryptographic approaches since they need and rely on a central trusted instance.
        One way to accomplish AAA in a P2P manner are de Bruijn-networks, since there
        data can be routed over multiple non-overlapping paths, thereby hampering
        malicious nodes from manipulation that data. Originally, de Bruijn-networks
        required a rather fixed network structure which made them unsuitable for wireless
        networks. In this paper we generalize de Bruijn-networks to an arbitrary number
        of nodes while keeping all their desired properties. This is achieved by
        decoupling link degree and character set of the native de Bruijn graph.
        Furthermore we describe how this makes the resulting network resistant against
        node churn}, 
  www_section = {authentication, P2P}, 
  url = {http://i30www.ira.uka.de/research/publications/p2p/}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/thiele06debruijn.pdf}, 
}
timing-fc2004
@conference{timing-fc2004,
  title = {Timing Attacks in Low-Latency Mix-Based Systems}, 
  author = {Brian Neil Levine and Michael K. Reiter and Chenxi Wang and Matthew Wright}, 
  booktitle = {Proceedings of Financial Cryptography (FC '04)}, 
  organization = {Springer-Verlag, LNCS 3110}, 
  year = {2004}, 
  month = {February}, 
  pages = {251--265}, 
  editor = {Ari Juels}, 
  publisher = {Springer-Verlag, LNCS 3110}, 
  abstract = {A mix is a communication proxy that attempts to hide the correspondence
        between its incoming and outgoing messages. Timing attacks are a significant
        challenge for mix-based systems that wish to support interactive, low-latency
        applications. However, the potency of these attacks has not been studied
        carefully. In this paper, we investigate timing analysis attacks on low-latency
        mix systems and clarify the threat they pose. We propose a novel technique,
        defensive dropping, to thwart timing attacks. Through simulations and analysis,
        we show that defensive dropping can be effective against attackers who employ
        timing analysis}, 
  isbn = {978-3-540-22420-4}, 
  doi = {10.1007/b98935}, 
  url = {http://www.springerlink.com/content/n4khdtwk7dqvj0u0/}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/timing-fc2004.pdf}, 
%%%%% ERROR: Missing field
% www_section = {?????},
}
tissec-latency-leak
@article{tissec-latency-leak,
  title = {How Much Anonymity does Network Latency Leak?}, 
  author = {Nicholas J. Hopper and Eugene Y. Vasserman and Eric Chan-Tin}, 
  journal = {ACM Transactions on Information and System Security}, 
  year = {2010}, 
  month = {January}, 
  pages = {82--91}, 
  abstract = {Low-latency anonymity systems such as Tor, AN.ON, Crowds, and Anonymizer.com
        aim to provide anonymous connections that are both untraceable by "local"
        adversaries who control only a few machines, and have low enough delay to support
        anonymous use of network services like web browsing and remote login. One
        consequence of these goals is that these services leak some information about the
        network latency between the sender and one or more nodes in the system. This
        paper reports on three experiments that partially measure the extent to which
        such leakage can compromise anonymity. First, using a public dataset of pairwise
        round-trip times (RTTs) between 2000 Internet hosts, we estimate that on average,
        knowing the network location of host A and the RTT to host B leaks 3.64 bits of
        information about the network location of B. Second, we describe an attack that
        allows a pair of colluding web sites to predict, based on local timing
        information and with no additional resources, whether two connections from the
        same Tor exit node are using the same circuit with 17\% equal error rate.
        Finally, we describe an attack that allows a malicious website, with access to a
        network coordinate system and one corrupted Tor router, to recover roughly 6.8
        bits of network location per hour}, 
  www_section = {anonymity, latency, Tor}, 
  isbn = {978-1-59593-703-2}, 
  doi = {10.1145/1315245.1315257}, 
  url = {http://portal.acm.org/citation.cfm?id=1315245.1315257}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/tissec-latency-leak.pdf},
}
tor-design
@conference{tor-design,
  title = {Tor: The Second-Generation Onion Router}, 
  author = {Roger Dingledine and Nick Mathewson and Paul Syverson}, 
  booktitle = {Proceedings of the 13th USENIX Security Symposium}, 
  organization = {USENIX Association Berkeley, CA, USA}, 
  year = {2004}, 
  month = {August}, 
  publisher = {USENIX Association Berkeley, CA, USA}, 
  abstract = {We present Tor, a circuit-based low-latency anonymous communication service.
        This second-generation Onion Routing system addresses limitations in the original
        design by adding perfect forward secrecy, congestion control, directory servers,
        integrity checking, configurable exit policies, and a practical design for
        location-hidden services via rendezvous points. Tor works on the real-world
        Internet, requires no special privileges or kernel modifications, requires little
        synchronization or coordination between nodes, and provides a reasonable tradeoff
        between anonymity, usability, and efficiency. We briefly describe our experiences
        with an international network of more than 30 nodes. We close with a list of open
        problems in anonymous communication}, 
  www_section = {onion routing}, 
  url = {http://portal.acm.org/citation.cfm?id=1251396}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/tor-design.pdf}, 
}
tor-soups07
@conference{tor-soups07,
  title = {Usability of anonymous web browsing: an examination of Tor interfaces and
        deployability}, 
  author = {Jeremy Clark and Paul C. van Oorschot and Carlisle Adams}, 
  booktitle = {Proceedings of the 3rd Symposium on Usable Privacy and Security (SOUPS
        '07)}, 
  organization = {ACM}, 
  year = {2007}, 
  month = {July}, 
  address = {New York, NY, USA}, 
  pages = {41--51}, 
  publisher = {ACM}, 
  abstract = {Tor is a popular privacy tool designed to help achieve online anonymity by
        anonymising web traffic. Employing cognitive walkthrough as the primary method,
        this paper evaluates four competing methods of deploying Tor clients, and a
        number of software tools designed to be used in conjunction with Tor: Vidalia,
        Privoxy, Torbutton, and FoxyProxy. It also considers the standalone anonymous
        browser TorPark. Our results show that none of the deployment options are fully
        satisfactory from a usability perspective, but we offer suggestions on how to
        incorporate the best aspects of each tool. As a framework for our usability
        evaluation, we also provide a set of guidelines for Tor usability compiled and
        adapted from existing work on usable security and human-computer interaction}, 
  www_section = {anonymity, onion routing, privacy, Tor, usable security}, 
  isbn = {978-1-59593-801-5}, 
  doi = {10.1145/1280680.1280687}, 
  url = {http://portal.acm.org/citation.cfm?id=1280680.1280687}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/tor-soups07.pdf}, 
}
torspinISC08
@conference{torspinISC08,
  title = {Compromising Anonymity Using Packet Spinning}, 
  author = {Vasilis Pappas and Elias Athanasopoulos and Sotiris Ioannidis and Evangelos P.
        Markatos}, 
  booktitle = {Proceedings of the 11th Information Security Conference (ISC 2008)}, 
  organization = {Springer-Verlag Berlin, Heidelberg}, 
  year = {2008}, 
  month = {September}, 
  publisher = {Springer-Verlag Berlin, Heidelberg}, 
  abstract = {We present a novel attack targeting anonymizing systems. The attack involves
        placing a malicious relay node inside an anonymizing system and keeping
        legitimate nodes "busy." We achieve this by creating circular circuits and
        injecting fraudulent packets, crafted in a way that will make them spin an
        arbitrary number of times inside our artificial loops. At the same time we inject
        a small number of malicious nodes that we control into the anonymizing system. By
        keeping a significant part of the anonymizing system busy spinning useless
        packets, we increase the probability of having our nodes selected in the creation
        of legitimate circuits, since we have more free capacity to route requests than
        the legitimate nodes. This technique may lead to the compromise of the anonymity
        of people using the system. To evaluate our novel attack, we used a real-world
        anonymizing system, TOR. We show that an anonymizing system that is composed of a
        series of relay nodes which perform cryptographic operations is vulnerable to our
        packet spinning attack. Our evaluation focuses on determining the cost we can
        introduce to the legitimate nodes by injecting the fraudulent packets, and the
        time required for a malicious client to create n-length TOR circuits. Furthermore
        we prove that routers that are involved in packet spinning do not have the
        capacity to process requests for the creation of new circuits and thus users are
        forced to select our malicious nodes for routing their data streams}, 
  www_section = {anonymity, attack, Tor}, 
  isbn = {978-3-540-85884-3}, 
  doi = {10.1007/978-3-540-85886-7_11}, 
  url = {http://portal.acm.org/citation.cfm?id=1432478.1432493}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/torspinISC08.pdf}, 
}
torta05
@conference{torta05,
  title = {Low-Cost Traffic Analysis of Tor}, 
  author = {Steven J. Murdoch and George Danezis}, 
  booktitle = {Proceedings of the 2005 IEEE Symposium on Security and Privacy}, 
  organization = {IEEE CS}, 
  year = {2005}, 
  month = {May}, 
  publisher = {IEEE CS}, 
  abstract = {Tor is the second generation Onion Router, supporting the anonymous transport
        of TCP streams over the Internet. Its low latency makes it very suitable for
        common tasks, such as web browsing, but insecure against traffic-analysis attacks
        by a global passive adversary. We present new traffic-analysis techniques that
        allow adversaries with only a partial view of the network to infer which nodes
        are being used to relay the anonymous streams and therefore greatly reduce the
        anonymity provided by Tor. Furthermore, we show that otherwise unrelated streams
        can be linked back to the same initiator. Our attack is feasible for the
        adversary anticipated by the Tor designers. Our theoretical attacks are backed up
        by experiments performed on the deployed, albeit experimental, Tor network. Our
        techniques should also be applicable to any low latency anonymous network. These
        attacks highlight the relationship between the field of traffic-analysis and more
        traditional computer security issues, such as covert channel analysis. Our
        research also highlights that the inability to directly observe network links
        does not prevent an attacker from performing traffic-analysis: the adversary can
        use the anonymising network as an oracle to infer the traffic load on remote
        nodes in order to perform traffic-analysis}, 
  www_section = {anonymity, onion routing, traffic analysis}, 
  isbn = {0-7695-2339-0}, 
  url = {http://portal.acm.org/citation.cfm?id=1059390}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/torta05.pdf}, 
}
trickle02
@conference{trickle02,
  title = {From a Trickle to a Flood: Active Attacks on Several Mix Types}, 
  author = {Andrei Serjantov and Roger Dingledine and Paul Syverson}, 
  booktitle = {Proceedings of Information Hiding Workshop (IH 2002)}, 
  organization = {Springer-Verlag, LNCS 2578}, 
  year = {2002}, 
  month = {October}, 
  editor = {Fabien Petitcolas}, 
  publisher = {Springer-Verlag, LNCS 2578}, 
  abstract = {The literature contains a variety of different mixes, some of which have been
        used in deployed anonymity systems. We explore their anonymity and message delay
        properties, and show how to mount active attacks against them by altering the
        traffic between the mixes. We show that if certain mixes are used, such attacks
        cannot destroy the anonymity of a particular message completely. We work out the
        cost of these attacks in terms of the number of messages the attacker must insert
        into the network and the time he must spend. We discuss advantages and
        disadvantages of these mixes and the settings in which their use is appropriate.
        Finally, we look at dummy traffic and SG mixes as other promising ways of
        protecting against the attacks, point out potential weaknesses in existing
        designs, and suggest improvements}, 
  www_section = {anonymity, attack}, 
  isbn = {978-3-540-00421-9}, 
  doi = {10.1007/3-540-36415-3}, 
  url = {http://www.springerlink.com/content/um0kf3dp88b0eg5v/}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/trickle02.pdf}, 
}
troncoso-ih2007
@conference{troncoso-ih2007,
  title = {Traffic Analysis Attacks on a Continuously-Observable Steganographic File
        System}, 
  author = {Carmela Troncoso and Claudia Diaz and Orr Dunkelman and Bart Preneel}, 
  booktitle = {Proceedings of Information Hiding Workshop (IH 2007)}, 
  organization = {Springer-Verlag}, 
  volume = {4567}, 
  year = {2007}, 
  month = {June}, 
  address = {Saint-Malo,FR}, 
  pages = {220--236}, 
  publisher = {Springer-Verlag}, 
  series = {Lecture Notes in Computer Science}, 
  abstract = {A continuously-observable steganographic file system allows to remotely store
        user files on a raw storage device; the security goal is to offer plausible
        deniability even when the raw storage device is continuously monitored by an
        attacker. Zhou, Pang and Tan have proposed such a system in [7] with a claim of
        provable security against traffic analysis. In this paper, we disprove their
        claims by presenting traffic analysis attacks on the file update algorithm of
        Zhou et al. Our attacks are highly effective in detecting file updates and
        revealing the existence and location of files. For multi-block files, we show
        that two updates are sufficient to discover the file. One-block files accessed a
        sufficient number of times can also be revealed. Our results suggest that simple
        randomization techniques are not sufficient to protect steganographic file
        systems from traffic analysis attacks}, 
  www_section = {traffic analysis}, 
  isbn = {978-3-540-77369-6}, 
  doi = {10.1007/978-3-540-77370-2}, 
  url = {http://www.springerlink.com/content/h5r4j539833k1k78/}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/troncoso-ih2007.pdf}, 
}
troncoso-pet2008
@conference{troncoso-pet2008,
  title = {Perfect Matching Statistical Disclosure Attacks}, 
  author = {Carmela Troncoso and Benedikt Gierlichs and Bart Preneel and Ingrid
        Verbauwhede}, 
  booktitle = {Proceedings of the Eighth International Symposium on Privacy Enhancing
        Technologies (PETS 2008)}, 
  organization = {Springer}, 
  year = {2008}, 
  month = {July}, 
  address = {Leuven, Belgium}, 
  pages = {2--23}, 
  editor = {Borisov, Nikita and Ian Goldberg}, 
  publisher = {Springer}, 
  abstract = {Traffic analysis is the best known approach to uncover relationships amongst
        users of anonymous communication systems, such as mix networks. Surprisingly, all
        previously published techniques require very specific user behavior to break the
        anonymity provided by mixes. At the same time, it is also well known that none of
        the considered user models reflects realistic behavior which casts some doubt on
        previous work with respect to real-life scenarios. We first present a user
        behavior model that, to the best of our knowledge, is the least restrictive
        scheme considered so far. Second, we develop the Perfect Matching Disclosure
        Attack, an efficient attack based on graph theory that operates without any
        assumption on user behavior. The attack is highly effective when de-anonymizing
        mixing rounds because it considers all users in a round at once, rather than
        single users iteratively. Furthermore, the extracted sender-receiver
        relationships can be used to enhance user profile estimations. We extensively
        study the effectiveness and efficiency of our attack and previous work when
        de-anonymizing users communicating through a threshold mix. Empirical results
        show the advantage of our proposal. We also show how the attack can be refined
        and adapted to different scenarios including pool mixes, and how precision can be
        traded in for speed, which might be desirable in certain cases}, 
  www_section = {mix, traffic analysis}, 
  url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.147.4953}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/troncoso-pet2008.pdf}, 
}
turner03lightweight
@booklet{turner03lightweight,
  title = {A Lightweight Currency Paradigm for the P2P Resource Market}, 
  author = {David A. Turner and Keith W. Ross}, 
  year = {2003}, 
  abstract = {A P2P resource market is a market in which peers trade resources (including
        storage, bandwidth and CPU cycles) and services with each other. We propose a
        specific paradigm for a P2P resource market. This paradigm has five key
        components: (i) pairwise trading market, with peers setting their own prices for
        offered resources; (ii) multiple currency economy, in which any peer can issue
        its own currency; (iii) no legal recourse, thereby limiting the transaction costs
        in trades; (iv) a simple, secure application-layer protocol; and (v) entity
        identification based on the entity's unique public key. We argue that the
        paradigm can lead to a flourishing P2P resource market, allowing applications to
        tap into the huge pool of surplus peer resources. We illustrate the paradigm and
        its corresponding Lightweight Currency Protocol (LCP) with several application
        examples}, 
  www_section = {P2P}, 
  url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.11.1309}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/LightweightParadigm.pdf},
}
usability:weis2006
@conference{usability:weis2006,
  title = {Anonymity Loves Company: Usability and the Network Effect}, 
  author = {Roger Dingledine and Nick Mathewson}, 
  booktitle = {Proceedings of the Fifth Workshop on the Economics of Information Security
        (WEIS 2006)}, 
  year = {2006}, 
  month = {June}, 
  address = {Cambridge, UK}, 
  editor = {Ross Anderson}, 
  abstract = {A growing field of literature is studying how usability impacts security [4].
        One class of security software is anonymizing networks--- overlay networks on the
        Internet that provide privacy by letting users transact (for example, fetch a web
        page or send an email) without revealing their communication partners. In this
        position paper we focus on the network effects of usability on privacy and
        security: usability is a factor as before, but the size of the user base also
        becomes a factor. We show that in anonymizing networks, even if you were smart
        enough and had enough time to use every system perfectly, you would nevertheless
        be right to choose your system based in part on its usability for other users}, 
  www_section = {anonymity, privacy}, 
  url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.61.510}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/usability-weis2006.pdf},
}
usenix11-pirtor
@conference{usenix11-pirtor,
  title = {PIR-Tor: Scalable Anonymous Communication Using Private Information Retrieval}, 
  author = {Prateek Mittal and Femi Olumofin and Carmela Troncoso and Borisov, Nikita and
        Ian Goldberg}, 
  booktitle = {Proceedings of the 20th USENIX Security Symposium}, 
  year = {2011}, 
  month = {August}, 
  address = {San Francisco, CA, USA}, 
  abstract = {Existing anonymous communication systems like Tor do not scale well as they
        require all users to maintain up-to-date information about all available Tor
        relays in the system. Current proposals for scaling anonymous communication
        advocate a peer-to-peer (P2P) approach. While the P2P paradigm scales to millions
        of nodes, it provides new opportunities to compromise anonymity. In this paper,
        we step away from the P2P paradigm and advocate a client-server approach to
        scalable anonymity. We propose PIR-Tor, an architecture for the Tor network in
        which users obtain information about only a few onion routers using private
        information retrieval techniques. Obtaining information about only a few onion
        routers is the key to the scalability of our approach, while the use of private
        retrieval information techniques helps preserve client anonymity. The security of
        our architecture depends on the security of PIR schemes which are well understood
        and relatively easy to analyze, as opposed to peer-to-peer designs that require
        analyzing extremely complex and dynamic systems. In particular, we demonstrate
        that reasonable parameters of our architecture provide equivalent security to
        that of the Tor network. Moreover, our experimental results show that the
        overhead of PIR-Tor is manageable even when the Tor network scales by two orders
        of magnitude}, 
  www_section = {anonymous communication, peer to peer, PIR-Tor}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/USENIX\%20-\%20PIR-Tor.pdf},
  url = {https://bibliography.gnunet.org}, 
}
usenix11-telex
@conference{usenix11-telex,
  title = {Telex: Anticensorship in the Network Infrastructure}, 
  author = {Eric Wustrow and Scott Wolchok and Ian Goldberg and J. Alex Halderman}, 
  booktitle = {Proceedings of the 20th USENIX Security Symposium}, 
  year = {2011}, 
  month = {August}, 
  address = {San Francisco, CA, USA}, 
  abstract = {In this paper, we present Telex, a new approach to resisting state-level
        Internet censorship. Rather than attempting to win the cat-and-mouse game of
        finding open proxies, we leverage censors' unwillingness to completely block
        day-to-day Internet access. In effect, Telex converts innocuous, unblocked
        websites into proxies, without their explicit collaboration. We envision that
        friendly ISPs would deploy Telex stations on paths between censors' networks and
        popular, uncensored Internet destinations. Telex stations would monitor seemingly
        innocuous flows for a special {\textquotedblleft}tag{\textquotedblright} and
        transparently divert them to a forbidden website or service instead. We propose a
        new cryptographic scheme based on elliptic curves for tagging TLS handshakes such
        that the tag is visible to a Telex station but not to a censor. In addition, we
        use our tagging scheme to build a protocol that allows clients to connect to
        Telex stations while resisting both passive and active attacks. We also present a
        proof-of-concept implementation that demonstrates the feasibility of our system}, 
  www_section = {anticensorship, network infrastructure state-level censorship, proxy,
        telex}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/Telex\%3A\%20Anticensorship\%20in\%20the\%20Network\%20Infrastructure.pdf},
  url = {https://bibliography.gnunet.org}, 
}
valet:pet2006
@conference{valet:pet2006,
  title = {Valet Services: Improving Hidden Servers with a Personal Touch}, 
  author = {Lasse {\O}verlier and Paul Syverson}, 
  booktitle = {Proceedings of the Sixth Workshop on Privacy Enhancing Technologies (PET
        2006)}, 
  organization = {Springer}, 
  year = {2006}, 
  month = {June}, 
  address = {Cambridge, UK}, 
  pages = {223--244}, 
  editor = {George Danezis and Philippe Golle}, 
  publisher = {Springer}, 
  abstract = {Location hidden services have received increasing attention as a means to
        resist censorship and protect the identity of service operators. Research and
        vulnerability analysis to date has mainly focused on how to locate the hidden
        service. But while the hiding techniques have improved, almost no progress has
        been made in increasing the resistance against DoS attacks directly or indirectly
        on hidden services. In this paper we suggest improvements that should be easy to
        adopt within the existing hidden service design, improvements that will both
        reduce vulnerability to DoS attacks and add QoS as a service option. In addition
        we show how to hide not just the location but the existence of the hidden service
        from everyone but the users knowing its service address. Not even the public
        directory servers will know how a private hidden service can be contacted, or
        know it exists}, 
  www_section = {censorship resistance, information hiding}, 
  isbn = {978-3-540-68790-0}, 
  doi = {10.1007/11957454}, 
  url = {http://www.springerlink.com/content/d58607007777r8l1/}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/valet-pet2006.pdf}, 
}
vleroythesis
@mastersthesis{vleroythesis,
  title = {Distributing social applications}, 
  author = {Vincent Leroy}, 
  school = {IRISA}, 
  year = {2010}, 
  month = {December}, 
  type = {phd}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/DistributingSocialApp2010Leroy.pdf},
  www_section = {Unsorted}, 
  url = {https://bibliography.gnunet.org}, 
}
vrancx:decentralized
@article{vrancx:decentralized,
  title = {Decentralized Learning in Markov Games}, 
  author = {Peter Vrancx and Katja Verbeeck and Ann Now{\'e}}, 
  journal = {IEEE Transactions on Systems, Man, and Cybernetics, Part B}, 
  volume = {38}, 
  year = {2008}, 
  month = {August}, 
  pages = {976--981}, 
  abstract = {Learning automata (LA) were recently shown to be valuable tools for designing
        multiagent reinforcement learning algorithms. One of the principal contributions
        of the LA theory is that a set of decentralized independent LA is able to control
        a finite Markov chain with unknown transition probabilities and rewards. In this
        paper, we propose to extend this algorithm to Markov games-a straightforward
        extension of single-agent Markov decision problems to distributed multiagent
        decision problems. We show that under the same ergodic assumptions of the
        original theorem, the extended algorithm will converge to a pure equilibrium
        point between agent policies}, 
  www_section = {algorithms, descentralized learning, LA, learning automata}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/IEEE\%20Transactions\%20on\%20Systems\%20-\%20Descentralized\%20learning.pdf},
}
wagner
@article{wagner,
  title = {Don't Shoot the Messenger: Limiting the Liability of Anonymous Remailers}, 
  author = {Robyn Wagner}, 
  journal = {New Mexico Law Review}, 
  volume = {32}, 
  number = {Winter}, 
  year = {2002}, 
  pages = {99--142}, 
  abstract = {I will close the remailer for the time being because the legal issues
        concerning the Internet in Finland are yet undefined. The legal protection of the
        users needs to be clarified. At the moment the privacy of Internet messages is
        judicially unclearI have also personally been a target because of the remailer.
        Unjustified accusations affect both my job and my private life}, 
  www_section = {privacy}, 
  url = {https://litigation-essentials.lexisnexis.com/webcd/app?action=DocumentDisplay\&crawlid=1\&doctype=cite\&docid=32+N.M.L.+Rev.+99\&srctype=smi\&srcid=3B15\&key=008c465fa13eb62c9370e4baa5eea0e5},
}
waldman01tangler
@conference{waldman01tangler,
  title = {Tangler: a censorship-resistant publishing system based on document
        entanglements}, 
  author = {Marc Waldman and David Mazi{\`e}res}, 
  booktitle = {Proceedings of the 8th ACM Conference on Computer and Communications
        Security (CCS 2001)}, 
  year = {2001}, 
  month = {November}, 
  pages = {126--135}, 
  abstract = {We describe the design of a censorship-resistant system that employs a unique
        document storage mechanism. Newly published documents are dependent on the blocks
        of previously published documents. We call this dependency an entanglement.
        Entanglement makes replication of previously published content an intrinsic part
        of the publication process. Groups of files, called collections, can be published
        together and named in a host-independent manner. Individual documents within a
        collection can be securely updated in such a way that future readers of the
        collection see and tamper-check the updates. The system employs a self-policing
        network of servers designed to eject non-compliant servers and prevent them from
        doing more harm than good}, 
  www_section = {censorship resistance, host-independent, self-policing network}, 
  isbn = {1-58113-385-5}, 
  doi = {10.1145/501983.502002}, 
  url = {http://portal.acm.org/citation.cfm?id=501983.502002\&coll=GUIDE\&dl=GUIDE\&type=series\&idx=SERIES320\&part=series\&WantType=Proceedings\&title=CCS\&CFID=75729899\&CFTOKEN=36385677},
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.24.3781.pdf}, 
}
wallach02p2psecurity
@conference{wallach02p2psecurity,
  title = {A Survey of Peer-to-Peer Security Issues}, 
  author = {Dan S. Wallach}, 
  booktitle = {ISSS}, 
  year = {2002}, 
  pages = {42--57}, 
  abstract = {Peer-to-peer (p2p) networking technologies have gained popularity as a
        mechanism for users to share files without the need for centralized servers. A
        p2p network provides a scalable and fault-tolerant mechanism to locate nodes
        anywhere on a network without maintaining a large amount of routing state. This
        allows for a variety of applications beyond simple file sharing. Examples include
        multicast systems, anonymous communications systems, and web caches. We survey
        security issues that occur in the underlying p2p routing protocols, as well as
        fairness and trust issues that occur in file sharing and other p2p applications.
        We discuss how techniques, ranging from cryptography, to random network probing,
        to economic incentives, can be used to address these problems}, 
  www_section = {cryptography, P2P, routing, security policy}, 
  url = {http://springerlink.metapress.com/openurl.asp?genre=article\&issn=0302-9743\&volume=2609\&spage=42},
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.84.9197.pdf}, 
}
wang:market-driven
@conference{wang:market-driven,
  title = {Market-driven bandwidth allocation in selfish overlay networks}, 
  author = {Weihong Wang and Baochun Li}, 
  booktitle = {INFOCOM'05. Proceedings of the 24th IEEE International Conference on
        Computer Communications}, 
  organization = {IEEE Computer Society}, 
  year = {2005}, 
  month = {March}, 
  address = {Miami, FL, USA}, 
  pages = {2578--2589}, 
  publisher = {IEEE Computer Society}, 
  abstract = {Selfish overlay networks consist of autonomous nodes that develop their own
        strategies by optimizing towards their local objectives and self-interests,
        rather than following prescribed protocols. It is thus important to regulate the
        behavior of selfish nodes, so that system-wide properties are optimized. In this
        paper, we investigate the problem of bandwidth allocation in overlay networks,
        and propose to use a market-driven approach to regulate the behavior of selfish
        nodes that either provide or consume services. In such markets, consumers of
        services select the best service providers, taking into account both the
        performance and the price of the service. On the other hand, service providers
        are encouraged to strategically decide their respective prices in a pricing game,
        in order to maximize their economic revenues and minimize losses in the long run.
        In order to overcome the limitations of previous models towards similar
        objectives, we design a decentralized algorithm that uses reinforcement learning
        to help selfish nodes to incrementally adapt to the local market, and to make
        optimized strategic decisions based on past experiences. We have simulated our
        proposed algorithm in randomly generated overlay networks, and have shown that
        the behavior of selfish nodes converges to their optimal strategies, and resource
        allocations in the entire overlay are near-optimal, and efficiently adapts to the
        dynamics of overlay networks}, 
  www_section = {bandwidth allocation, economics, market-driven, prescribed protocol,
        selfish overlay network}, 
  isbn = {0-7803-8968-9}, 
  doi = {http://dx.doi.org/10.1109/INFCOM.2005.1498542}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/INFOCOM\%2705\%20-\%20Market-driven\%20bandwidth\%20allocation.pdf},
}
warta04-Klonowski
@conference{warta04-Klonowski,
  title = {Universal Re-encryption of Signatures and Controlling Anonymous Information
        Flow}, 
  author = {Marek Klonowski and Miroslaw Kutylowski and Anna Lauks and Filip Zagorski}, 
  booktitle = {Proceedings of WARTACRYPT '04}, 
  year = {2004}, 
  month = {July}, 
  abstract = {Anonymous communication protocols, very essential for preserving privacy of
        the parties communicating, may lead to severe problems. A malicious server may
        use anonymous communication protocols for injecting unwelcome messages into the
        system so that their source can be hardly traced. So anonymity and privacy
        protection on one side and protection against such phenomena as spam are so far
        contradictory goals. We propose a mechanism that may be used to limit the
        mentioned side effects of privacy protection. During the protocol proposed each
        encrypted message admitted into the system is signed by a respective authority.
        Then, on its route through the network the encrypted message and the signature
        are re-encrypted universally. The purpose of universal re-encryption is to hide
        the routes of the messages from an observer monitoring the traffic. Despite
        re-encryption, signature of the authority remains valid. Depending on a
        particular application, verification of the signature is possible either off-line
        by anybody with the access to the ciphertext and the signature or requires
        contact with the authority that has issued the signature}, 
  www_section = {anonymity, information hiding, privacy, re-encryption}, 
  url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.108.4976}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.108.4976.pdf}, 
}
web-mix:pet2000
@conference{web-mix:pet2000,
  title = {Web MIXes: A system for anonymous and unobservable Internet access}, 
  author = {Oliver Berthold and Hannes Federrath and Stefan K{\"o}psell}, 
  booktitle = {Proceedings of Designing Privacy Enhancing Technologies: Workshop on Design
        Issues in Anonymity and Unobservability}, 
  organization = {Springer-Verlag, LNCS 2009}, 
  year = {2000}, 
  month = {July}, 
  pages = {115--129}, 
  publisher = {Springer-Verlag, LNCS 2009}, 
  abstract = {We present the architecture, design issues and functions of a MIX-based
        system for anonymous and unobservable real-time Internet access. This system
        prevents traffic analysis as well as flooding attacks. The core technologies
        include an adaptive, anonymous, time/volumesliced channel mechanism and a
        ticket-based authentication mechanism. The system also provides an interface to
        inform anonymous users about their level of anonymity and unobservability}, 
  www_section = {anonymity, traffic analysis}, 
  isbn = {978-3-540-41724-8}, 
  doi = {10.1007/3-540-44702-4}, 
  url = {http://portal.acm.org/citation.cfm?id=371983}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/web-mix-pet2000.pdf}, 
}
wiangsripanawan-acsw07
@conference{wiangsripanawan-acsw07,
  title = {Design principles for low latency anonymous network systems secure against
        timing attacks}, 
  author = {Rungrat Wiangsripanawan and Willy Susilo and Rei Safavi-Naini}, 
  booktitle = {Proceedings of the fifth Australasian symposium on ACSW frontiers (ACSW
        '07)}, 
  organization = {Australian Computer Society, Inc}, 
  year = {2007}, 
  address = {Darlinghurst, Australia, Australia}, 
  pages = {183--191}, 
  publisher = {Australian Computer Society, Inc}, 
  abstract = {Low latency anonymous network systems, such as Tor, were considered secure
        against timing attacks when the threat model does not include a global adversary.
        In this threat model the adversary can only see part of the links in the system.
        In a recent paper entitled Low-cost traffic analysis of Tor, it was shown that a
        variant of timing attack that does not require a global adversary can be applied
        to Tor. More importantly, authors claimed that their attack would work on any low
        latency anonymous network systems. The implication of the attack is that all low
        latency anonymous networks will be vulnerable to this attack even if there is no
        global adversary. In this paper, we investigate this claim against other low
        latency anonymous networks, including Tarzan and Morphmix. Our results show that
        in contrast to the claim of the aforementioned paper, the attack may not be
        applicable in all cases. Based on our analysis, we draw design principles for
        secure low latency anonymous network system (also secure against the above
        attack)}, 
  www_section = {anonymity, latency, Morphmix, Tarzan, timing attack, Tor}, 
  isbn = {1-920-68285-X}, 
  url = {http://portal.acm.org/citation.cfm?id=1274553}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/wiangsripanawan-acsw07.pdf},
}
wisa04-Klonowski
@conference{wisa04-Klonowski,
  title = {Anonymous Communication with On-line and Off-line Onion Encoding}, 
  author = {Marcin Gomulkiewicz and Marek Klonowski and Miroslaw Kutylowski}, 
  booktitle = {Proceedings of Workshop on Information Security Applications (WISA 2004)}, 
  organization = {Springer Berlin / Heidelberg}, 
  year = {2004}, 
  month = {August}, 
  publisher = {Springer Berlin / Heidelberg}, 
  abstract = {Encapsulating messages in onions is one of the major techniques providing
        anonymous communication in computer networks. To some extent, it provides
        security against traffic analysis by a passive adversary. However, it can be
        highly vulnerable to attacks by an active adversary. For instance, the adversary
        may perform a simple so--called repetitive attack: a malicious server sends the
        same massage twice, then the adversary traces places where the same message
        appears twice -- revealing the route of the original message. A repetitive attack
        was examined for mix--networks. However, none of the countermeasures designed is
        suitable for onion--routing. In this paper we propose an
        {\textquotedblleft}onion-like{\textquotedblright} encoding design based on
        universal reencryption. The onions constructed in this way can be used in a
        protocol that achieves the same goals as the classical onions, however, at the
        same time we achieve immunity against a repetitive attack. Even if an adversary
        disturbs communication and prevents processing a message somewhere on the onion
        path, it is easy to identify the malicious server performing the attack and
        provide an evidence of its illegal behavior}, 
  www_section = {onion routing, repetitive attack, universal re-encryption, unlinkability}, 
  isbn = {978-3-540-24302-1}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/wisa04-Klonowski.pdf}, 
}
wk11-malice-vs-anon
@conference{wk11-malice-vs-anon,
  title = {Malice versus AN.ON: Possible Risks of Missing Replay and Integrity Protection}, 
  author = {Benedikt Westermann and Dogan Kesdogan}, 
  booktitle = {FC'11--Proceedings of Financial Cryptography and Data Security}, 
  year = {2011}, 
  month = {February}, 
  address = {St. Lucia}, 
  abstract = {In this paper we investigate the impact of missing replay protection as well
        as missing integrity protection concerning a local attacker in AN.ON. AN.ON is a
        low latency anonymity network mostly used to anonymize web traffic. We
        demonstrate that both protection mechanisms are important by presenting two
        attacks that become feasible as soon as the mechanisms are missing. We mount both
        attacks on the AN.ON network which neither implements replay protection nor
        integrity protection yet}, 
  www_section = {AN.ON, anonymity network, integrity protection, replay protection}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/FC\%2711\%20-\%20Malice\%20versus\%20AN.ON_.pdf},
  url = {https://bibliography.gnunet.org}, 
}
wpes06:heydt-benjamin
@conference{wpes06:heydt-benjamin,
  title = {Nonesuch: a mix network with sender unobservability}, 
  author = {Andrei Serjantov and Benessa Defend}, 
  booktitle = {Proceedings of the Workshop on Privacy in the Electronic Society (WPES
        2006)}, 
  organization = {ACM Press}, 
  year = {2006}, 
  address = {New York, NY, USA}, 
  pages = {1--8}, 
  publisher = {ACM Press}, 
  abstract = {Oblivious submission to anonymity systems is a process by which a message may
        be submitted in such a way that neither the anonymity network nor a global
        passive adversary may determine that a valid message has been sent. We present
        Nonesuch: a mix network with steganographic submission and probabilistic
        identification and attenuation of cover traffic. In our system messages are
        submitted as stegotext hidden inside Usenet postings. The steganographic
        extraction mechanism is such that the the vast majority of the Usenet postings
        which do not contain keyed stegotext will produce meaningless output which serves
        as cover traffic, thus increasing the anonymity of the real messages. This cover
        traffic is subject to probabilistic attenuation in which nodes have only a small
        probability of distinguishing cover messages from "real" messages. This
        attenuation prevents cover traffic from travelling through the network in an
        infinite loop, while making it infeasible for an entrance node to distinguish
        senders}, 
  www_section = {oblivious circuits, public key cryptography, steganography,
        unobservability}, 
  isbn = {1-59593-556-8}, 
  doi = {10.1145/1179601.1179603}, 
  url = {http://portal.acm.org/citation.cfm?id=1179601.1179603}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/wpes06-heydt-benjamin.pdf},
}
wpes09-bridge-attack
@conference{wpes09-bridge-attack,
  title = {On the risks of serving whenever you surf: Vulnerabilities in Tor's blocking
        resistance design}, 
  author = {Jon McLachlan and Nicholas J. Hopper}, 
  booktitle = {Proceedings of the Workshop on Privacy in the Electronic Society (WPES
        2009)}, 
  organization = {ACM}, 
  year = {2009}, 
  month = {November}, 
  publisher = {ACM}, 
  abstract = {In Tor, a bridge is a client node that volunteers to help censored users
        access Tor by serving as an unlisted, first-hop relay. Since bridging is
        voluntary, the success of this circumvention mechanism depends critically on the
        willingness of clients to act as bridges. We identify three key architectural
        shortcomings of the bridge design: (1) bridges are easy to find; (2) a bridge
        always accepts connections when its operator is using Tor; and (3) traffic to and
        from clients connected to a bridge interferes with traffic to and from the bridge
        operator. These shortcomings lead to an attack that can expose the IP address of
        bridge operators visiting certain web sites over Tor. We also discuss mitigation
        mechanisms}, 
  www_section = {blocking resistance}, 
  isbn = {978-1-60558-783-7}, 
  doi = {10.1145/1655188.1655193}, 
  url = {http://portal.acm.org/citation.cfm?id=1655193}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/wpes09-bridge-attack.pdf},
}
wpes09-dht-attack
@conference{wpes09-dht-attack,
  title = {Hashing it out in public: Common failure modes of DHT-based anonymity schemes}, 
  author = {Andrew Tran and Nicholas J. Hopper and Yongdae Kim}, 
  booktitle = {Proceedings of the Workshop on Privacy in the Electronic Society (WPES
        2009)}, 
  organization = {ACM}, 
  year = {2009}, 
  month = {November}, 
  publisher = {ACM}, 
  abstract = {We examine peer-to-peer anonymous communication systems that use Distributed
        Hash Table algorithms for relay selection. We show that common design flaws in
        these schemes lead to highly effective attacks against the anonymity provided by
        the schemes. These attacks stem from attacks on DHT routing, and are not
        mitigated by the well-known DHT security mechanisms due to a fundamental mismatch
        between the security requirements of DHT routing's put/get functionality and
        anonymous routing's relay selection functionality. Our attacks essentially allow
        an adversary that controls only a small fraction of the relays to function as a
        global active adversary. We apply these attacks in more detail to two schemes:
        Salsa and Cashmere. In the case of Salsa, we show that an attacker that controls
        10\% of the relays in a network of size 10,000 can compromise more than 80\% of
        all completed circuits; and in the case of Cashmere, we show that an attacker
        that controls 20\% of the relays in a network of size 64000 can compromise 42\%
        of the circuits}, 
  www_section = {anonymity, denial-of-service, P2P}, 
  isbn = {978-1-60558-783-7}, 
  doi = {10.1145/1655188.1655199}, 
  url = {http://portal.acm.org/citation.cfm?id=1655188.1655199}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/wpes09-dht-attack.pdf}, 
}
wpes09-xpay
@conference{wpes09-xpay,
  title = {XPay: Practical anonymous payments for Tor routing and other networked
        services}, 
  author = {Yao Chen and Radu Sion and Bogdan Carbunar}, 
  booktitle = {Proceedings of the Workshop on Privacy in the Electronic Society (WPES
        2009)}, 
  organization = {ACM}, 
  year = {2009}, 
  month = {November}, 
  publisher = {ACM}, 
  abstract = {We design and analyze the first practical anonymous payment mechanisms for
        network services. We start by reporting on our experience with the implementation
        of a routing micropayment solution for Tor. We then propose micropayment
        protocols of increasingly complex requirements for networked services, such as
        P2P or cloud-hosted services. The solutions are efficient, with bandwidth and
        latency overheads of under 4\% and 0.9 ms respectively (in ORPay for Tor),
        provide full anonymity (both for payers and payees), and support thousands of
        transactions per second}, 
  www_section = {anonymity, onion routing, payment, privacy}, 
  isbn = {978-1-60558-783-7}, 
  doi = {10.1145/1655188.1655195}, 
  url = {http://portal.acm.org/citation.cfm?id=1655188.1655195}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/wpes09-xpay.pdf}, 
}
wpes11-bridgespa
@conference{wpes11-bridgespa,
  title = {BridgeSPA: Improving Tor Bridges with Single Packet Authorization}, 
  author = {Rob Smits and Divam Jain and Sarah Pidcock and Ian Goldberg and Urs
        Hengartner}, 
  booktitle = {WPES'11--Proceedings of the Workshop on Privacy in the Electronic Society}, 
  organization = {ACM}, 
  year = {2011}, 
  month = {October}, 
  address = {Chicago, IL, United States}, 
  publisher = {ACM}, 
  abstract = {Tor is a network designed for low-latency anonymous communications. Tor
        clients form circuits through relays that are listed in a public directory, and
        then relay their encrypted traffic through these circuits. This indirection makes
        it difficult for a local adversary to determine with whom a particular Tor user
        is communicating. In response, some local adversaries restrict access to Tor by
        blocking each of the publicly listed relays. To deal with such an adversary, Tor
        uses bridges, which are unlisted relays that can be used as alternative entry
        points into the Tor network. Unfortunately, issues with Tor's bridge
        implementation make it easy to discover large numbers of bridges. An adversary
        that hoards this information may use it to determine when each bridge is online
        over time. If a bridge operator also browses with Tor on the same machine, this
        information may be sufficient to deanonymize him. We present BridgeSPA as a
        method to mitigate this issue. A client using BridgeSPA relies on innocuous
        single packet authorization (SPA) to present a time-limited key to a bridge.
        Before this authorization takes place, the bridge will not reveal whether it is
        online. We have implemented BridgeSPA as a working proof-of-concept, which is
        available under an open-source licence}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/WPES\%2711\%20-\%20bridgeSPA.pdf},
  www_section = {Unsorted}, 
  url = {https://bibliography.gnunet.org}, 
}
wpes11-faust
@conference{wpes11-faust,
  title = {FAUST: Efficient, TTP-Free Abuse Prevention by Anonymous Whitelisting}, 
  author = {Peter Lofgren and Nicholas J. Hopper}, 
  booktitle = {WPES'11--Proceedings of the Workshop on Privacy in the Electronic Society}, 
  organization = {ACM}, 
  year = {2011}, 
  month = {October}, 
  address = {Chicago, IL, United States}, 
  publisher = {ACM}, 
  abstract = {We introduce Faust, a solution to the {\textquotedblleft}anonymous
        blacklisting problem:{\textquotedblright} allow an anonymous user to prove that
        she is authorized to access an online service such that if the user misbehaves,
        she retains her anonymity but will be unable to authenticate in future sessions.
        Faust uses no trusted third parties and is one to two orders of magnitude more
        efficient than previous schemes without trusted third parties. The key idea
        behind Faust is to eliminate the explicit blacklist used in all previous
        approaches, and rely instead on an implicit whitelist, based on blinded
        authentication tokens}, 
  www_section = {anonymous authentication, anonymous blacklisting, privacy-enhancing
        revocation}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/WPES\%2711\%20-\%20FAUST.pdf},
  url = {https://bibliography.gnunet.org}, 
}
wpes11-panchenko
@conference{wpes11-panchenko,
  title = {Website Fingerprinting in Onion Routing Based Anonymization Networks}, 
  author = {Andriy Panchenko and Lukas Niessen and Andreas Zinnen and Thomas Engel}, 
  booktitle = {WPES'11--Proceedings of the Workshop on Privacy in the Electronic Society}, 
  organization = {ACM}, 
  year = {2011}, 
  month = {October}, 
  address = {Chicago, IL, United States}, 
  publisher = {ACM}, 
  abstract = {Low-latency anonymization networks such as Tor and JAP claim to hide the
        recipient and the content of communications from a local observer, i.e., an
        entity that can eavesdrop the traffic between the user and the first
        anonymization node. Especially users in totalitarian regimes strongly depend on
        such networks to freely communicate. For these people, anonymity is particularly
        important and an analysis of the anonymization methods against various attacks is
        necessary to ensure adequate protection. In this paper we show that anonymity in
        Tor and JAP is not as strong as expected so far and cannot resist website
        fingerprinting attacks under certain circumstances. We first define features for
        website fingerprinting solely based on volume, time, and direction of the
        traffic. As a result, the subsequent classification becomes much easier. We apply
        support vector machines with the introduced features. We are able to improve
        recognition results of existing works on a given state-of-the-art dataset in Tor
        from 3\% to 55\% and in JAP from 20\% to 80\%. The datasets assume a closed-world
        with 775 websites only. In a next step, we transfer our findings to a more
        complex and realistic open-world scenario, i.e., recognition of several websites
        in a set of thousands of random unknown websites. To the best of our knowledge,
        this work is the first successful attack in the open-world scenario. We achieve a
        surprisingly high true positive rate of up to 73\% for a false positive rate of
        0.05\%. Finally, we show preliminary results of a proof-of-concept implementation
        that applies camouflage as a countermeasure to hamper the fingerprinting attack.
        For JAP, the detection rate decreases from 80\% to 4\% and for Tor it drops from
        55\% to about 3\%}, 
  www_section = {anonymous communication, pattern recognition, privacy, traffic analysis,
        website fingerprinting}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/WPES\%2711\%20-\%20Fingerprinting.pdf},
  url = {https://bibliography.gnunet.org}, 
}
wright02
@conference{wright02,
  title = {An Analysis of the Degradation of Anonymous Protocols}, 
  author = {Matthew Wright and Micah Adler and Brian Neil Levine and Clay Shields}, 
  booktitle = {Proceedings of the Network and Distributed Security Symposium--NDSS '02}, 
  organization = {IEEE}, 
  year = {2002}, 
  month = {February}, 
  publisher = {IEEE}, 
  abstract = {There have been a number of protocols proposed for anonymous network
        communication. In this paper we investigate attacks by corrupt group members that
        degrade the anonymity of each protocol over time. We prove that when a particular
        initiator continues communication with a particular responder across path
        reformations, existing protocols are subject to the attack. We use this result to
        place an upper bound on how long existing protocols, including Crowds, Onion
        Routing, Hordes, Web Mixes, and DC-Net, can maintain anonymity in the face of the
        attacks described. Our results show that fully-connected DC-Net is the most
        resilient to these attacks, but it su$\#$ers from scalability issues that keep
        anonymity group sizes small. Additionally, we show how violating an assumption of
        the attack allows malicious users to setup other participants to falsely appear
        to be the initiator of a connection}, 
  www_section = {anonymity, Crowds, dining cryptographers, Hordes, onion routing}, 
  url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.3.9435}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/wright-degrade.pdf}, 
}
wright03
@conference{wright03,
  title = {Defending Anonymous Communication Against Passive Logging Attacks}, 
  author = {Matthew Wright and Micah Adler and Brian Neil Levine and Clay Shields}, 
  booktitle = {Proceedings of the 2003 IEEE Symposium on Security and Privacy}, 
  organization = {IEEE Computer Society Washington, DC, USA}, 
  year = {2003}, 
  month = {May}, 
  pages = {28--43}, 
  publisher = {IEEE Computer Society Washington, DC, USA}, 
  abstract = {We study the threat that passive logging attacks poseto anonymous
        communications. Previous work analyzedthese attacks under limiting assumptions.
        We first describea possible defense that comes from breaking the assumptionof
        uniformly random path selection. Our analysisshows that the defense improves
        anonymity in the staticmodel, where nodes stay in the system, but fails in a
        dynamicmodel, in which nodes leave and join. Additionally,we use the dynamic
        model to show that the intersectionattack creates a vulnerability in certain
        peer-to-peer systemsfor anonymous communciations. We present simulationresults
        that show that attack times are significantlylower in practice than the upper
        bounds given by previouswork. To determine whether users' web traffic has
        communicationpatterns required by the attacks, we collectedand analyzed the web
        requests of users. We found that,for our study, frequent and repeated
        communication to thesame web site is common}, 
  www_section = {attack, P2P}, 
  isbn = {0-7695-1940-7}, 
  url = {http://portal.acm.org/citation.cfm?id=830556}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/wright-passive.pdf ,
        https://git.gnunet.org/bibliography.git/plain/docs/wright-passive2.pdf}, 
}
xor-trees
@article{xor-trees,
  title = {Xor-trees for efficient anonymous multicast and reception}, 
  author = {Shlomi Dolev and Rafail Ostrovsky}, 
  journal = {ACM Trans. Inf. Syst. Secur}, 
  volume = {3}, 
  number = {2}, 
  year = {2000}, 
  address = {New York, NY, USA}, 
  pages = {63--84}, 
  publisher = {ACM Press}, 
  abstract = {In this work we examine the problem of efficient anonymous broadcast and
        reception in general communication networks. We show an algorithm which achieves
        anonymous communication with O(1) amortized communication complexity on each link
        and low computational complexity. In contrast, all previous solutions require
        polynomial (in the size of the network and security parameter) amortized
        communication complexity}, 
  www_section = {anonymity, anonymous multicast, communication complexity}, 
  isbn = {978-3-540-63384-6}, 
  issn = {1094-9224}, 
  doi = {10.1145/354876.354877}, 
  url = {http://portal.acm.org/citation.cfm?id=354876.354877}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/10.1.1.73.6464.pdf}, 
}
xrs2016
@mastersthesis{xrs2016,
  title = {GNUnet und Informationsmacht: Analyse einer P2P-Technologie und ihrer sozialen
        Wirkung}, 
  author = {Christian Ricardo K{\"u}hne}, 
  school = {Humboldt-Universit{\"a}t zu Berlin}, 
  volume = {Diplominformatiker}, 
  year = {2016}, 
  month = {April}, 
  address = {Berlin}, 
  pages = {0--103}, 
  type = {Diplomarbeit}, 
  abstract = {This thesis studies the GNUnet project comprising its history, ideas and the
        P2P network technology. It specifically investigates the question of emancipatory
        potentials with regard to forms of information power due to a widely deployed new
        Internet technology and tries to identify essential suspensions of power within
        the scope of an impact assessment. Moreover, we will see by contrasting the
        GNUnet project with the critical data protection project, founded on social
        theory, that both are heavily concerned about the problem of illegitimate and
        unrestrained information power, giving us additional insights for the assessment.
        Last but least I'll try to present a scheme of how both approaches may interact
        to realize their goals}, 
  www_section = {GNUnet, peer-to-peer}, 
  www_pdf_url = {https://git.gnunet.org/bibliography.git/plain/docs/xrs2016.pdf}, 
  www_tags = {selected}, 
  url = {https://bibliography.gnunet.org}, 
}



Go to top