publications-hd-2010-2019.bib
@inproceedings{bernard2012cross,
title = {Cross-domain vulnerabilities over social networks},
author = {Bernard, Catherine and Debar, Herv{\'e} and
Benayoune, Salim},
booktitle = {Proceedings of the 4th International Conference on
Computational Aspects of Social Networks (CASoN'12)},
pages = {8--13},
year = 2012,
location = {Sao Carlos, Brazil},
url = {http://hal.archives-ouvertes.fr/hal-00813344},
doi = {10.1109/CASoN.2012.6412370},
keywords = {Internet, computer crime, social networking
(online), unsolicited e-mail, video signal
processing, video streaming, Adobe Flash, Facebook,
Internet, Twitter, YouTube, cross-domain
vulnerabilities, phishing vectors, social networks,
spam, video sharing site, video streaming
application, video traffic, Browsers, security,
servers, streaming media, cross-domain attack, Flash
security, social network security},
abstract = {Recent years have seen a tremendous growth of social
networks such as Facebook and Twitter. At the same
time, the share of video traffic in the Internet has
also significantly increased, and the two functions
are getting closer to one another. YouTube, the most
famous video sharing site, allows people to comment
on videos with other people while Facebook and
Twitter are important vectors into sharing
videos. Both video channels and social networks are
increasingly vulnerable attack targets. For example,
social networks are also considerable spam and
phishing vectors, and Adobe Flash as the premier
video streaming application is associated with
numerous software vulnerabilities. This is a good
way for attackers to compromise sites with embedded
Flash objects. In this paper, we present the
technical background of the cross-domain mechanisms
and the security implications. Several recent
studies have demonstrated the weakness of the
cross-domain policy, leading to session hijacking or
the leakage of sensitive information. Current
solutions to detect these vulnerabilities use a
client-side approach. The purpose of our work is to
present a new approach based on network flows
analysis to detect malicious behavior.},
month = {November},
organization = {IEEE}
}
@inproceedings{carlinet2010caching,
title = {Caching P2P Traffic: What are the Benefits for an
ISP?},
author = {Carlinet, Yannick and Debar, Herv{\'e} and Gourhant,
Yvon and M{\'e}, Ludovic},
booktitle = {Proceedings of the Ninth International Conference on
Networks (ICN'2010)},
pages = {376--383},
year = 2010,
organization = {IEEE},
month = {April},
keywords = {cache storage, peer-to-peer computing,
telecommunication traffic, ISP, P2P overlay, P2P
traffic, cache performances, peer-to-peer traffic,
Bandwidth, Costs, IP networks, Large-scale systems,
Monitoring, Peer to peer computing, Performance
evaluation, Telecommunication traffic, Traffic
control, Cache, traffic modeling, traffic
monitoring},
doi = {10.1109/ICN.2010.67},
abstract = {Caching P2P content could be an effective way of
alleviating network usage, thereby reducing
operation costs for operators. However, cache
performances are affected by a lot of factors
related to the cache itself, but also to the
properties of the P2P overlay, the P2P contents, and
the cache deployment location. In order to evaluate
the potential usefulness of P2P caches, we have
setup a large-scale experiment for monitoring
traffic of a popular P2P application in the
operational network of France Telecom. After gaining
some insights on the characteristics of the P2P
traffic, we simulate a cache based on data contained
in our trace. We are then able to evaluate cache
performances in terms of the bandwidth that could
have been saved if a cache was really in the network
during our measurements. Additionally, the paper
studies the impact of various factors on
performance, such as cache properties and more
importantly the number of customers served by the
cache. The results show that the bandwidth gain can
reach 23\% of P2P traffic with a passive cache.}
}
@inproceedings{carlinet2010evaluation,
title = {Evaluation of P4P Based on Real Traffic Measurement},
author = {Carlinet, Yannick and Mé, Ludovic and Gourhant, Yvon
and Debar, Herv{\'e}},
booktitle = {Proceedings of the Fifth International Conference on
Internet Monitoring and Protection (ICIMP 2010)},
pages = {129--134},
year = 2010,
organization = {IEEE},
url = {http://hal-supelec.archives-ouvertes.fr/hal-00534571},
month = {May},
keywords = {peer-to-peer computing, P2P traffic, P4P evaluation,
network infrastructure, peer-to-peer traffic, real
traffic measurement, system testing, P2P, P4P,
Peer-to-peer, traffic monitoring, traffic
optimization, transit/peering points},
doi = {10.1109/ICIMP.2010.25},
abstract = {Peer-To-Peer (P2P) traffic represents a significant
proportion of the traffic today. However, currently
widespread P2P systems take no account of the
underlying network infrastructure. But by doing so,
they would be able to perform a much more efficient
peer selection. The P4P approach aims at allowing
ISPs to cooperate with peers in order to improve
their peer selection in the overlay. The objectives
of this paper are to assess the benefits of P4P,
both for the P2P system and the ISP, thanks to an
experiment of a scale never achieved before, there
by complementing the results obtained in previous
work. The results show that P2P applications need
more information than just the Internet domain, in
order to improve their performance, and that the
inter-domain P2P traffic can be reduced by at least
72\%.}
}
@inproceedings{cuppens2010negotiation,
title = {Negotiation of Prohibition: An Approach Based on
Policy Rewriting},
author = {Cuppens, Nora and Cuppens, Fr{\'e}d{\'e}ric and
Haidar, D Abi and Debar, Herv{\'e}},
pages = {173--187},
year = 2008,
isbn = {978-0-387-09698-8},
booktitle = {Proceedings of the {IFIP TC11} 23rd International
Information Security Conference (SEC'08},
volume = 278,
series = {{IFIP} – The International Federation for
Information Processing},
editor = {Jajodia, Sushil and Samarati, Pierangela and Cimato,
Stelvio},
doi = {10.1007/978-0-387-09699-5_12},
keywords = {security policies, OrBAC, ABAC, access control,
access control models, negotiation, policy
negotiation, access negotiation},
abstract = {Traditionally, access control is enforced by
centralized stand-alone architectures. In this case,
the access controller knows all information
necessary to evaluate the access control policy. As
a consequence, when a subject sends a query to the
access controller, this access controller does not
need to interact with this subject to decide if this
query must be accepted or rejected. }
}
@inproceedings{dassouki2013tcp,
title = {A TCP delay-based mechanism for detecting congestion
in the Internet},
author = {Dassouki, Khaled and Debar, Herv{\'e} and Safa,
Haidar and Hijazi, Abbas},
booktitle = {Proceedings of the Third International Conference on
Communications and Information Technology (ICCIT
2013)},
pages = {141--145},
year = 2013,
organization = {IEEE},
location = {Beirut, Lebanon},
month = {June},
doi = {10.1109/ICCITechnology.2013.6579538},
url = {http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=6579538&isnumber=6579508},
keywords = {Internet, telecommunication links, transport
protocols, Internet congestion, TCP delay-based
mechanism, active queue management algorithms,
aggregation link, buffer statistics, Algorithm
design and analysis, Delays, Estimation, Information
technology, Monitoring, Active Queue Management,
Congestion Detection, TCP, throughput},
abstract = {Internet congestion existing solutions such as
active queue management algorithms have many
shortcomings, mainly related to the detection
phase. These algorithms depend on routers' buffer
statistics to detect congestion and their
performance is highly affected by the environment
and the parameters that are used. In this paper we
are proposing a mechanism that is capable of
detecting congestions by monitoring passively an
aggregation link. The proposed mechanism does not
need parameterizations since all the used parameters
are deduced from public real internet traces using
statistical approaches and uses TCP delays as a
detection parameter. It is dynamic since the
detection is proportional to the severity of the
congestion. Experimental results have shown that the
proposed mechanism is able to detect congestion
rapidly and does not suffer from false alarms.}
}
@inproceedings{debar2010service,
title = {Service dependencies in information systems
security},
author = {Debar, Herv{\'e} and Kheir, Nizar and
Cuppens-Boulahia, Nora and Cuppens,
Fr{\'e}d{\'e}ric},
booktitle = {Proceedings of the 5th International Conference on
Mathematical Methods, Models and Architectures for
Computer Network Security (MMM-ACNS'10)},
pages = {1--20},
year = 2010,
isbn = {3-642-14705-4, 978-3-642-14705-0},
location = {St. Petersburg, Russia},
volume = 6258,
series = {Lecture Notes in Computer Science},
editor = {Kotenko, Igor and Skormin, Victor},
doi = {10.1007/978-3-642-14706-7_1},
numpages = 20,
url = {http://dl.acm.org/citation.cfm?id=1885194.1885196},
acmid = 1885196,
publisher = {Springer-Verlag},
address = {Berlin, Heidelberg},
keywords = {intrusion detection, counter-measures, intrusion
response, service dependencies},
abstract = {In the complex world of information services, we are
realizing that system dependencies upon one another
have not only operational implications but also
security implications. These security implications
are multifold. Beyond allowing an attacker to
propagate over an information system by leveraging
stepping stones vulnerabilities, it also allows a
defender to select the most interesting enforcement
points for its policies, overall reducing the cost
of managing the security of these complex
systems. In this paper, we present a dependency
model that has been designed for the purpose of
providing security operators with a quantitative
decision support system for deploying and managing
security policies.}
}
@inproceedings{granadillo2012combination,
title = {Combination approach to select optimal
countermeasures based on the RORI index},
author = {Granadillo, Gustavo Gonzalez and Jacob, Greagoire
and Debar, Herv{\'e} and Coppolino, Luigi},
booktitle = {Proceedings of the Second International Conference
onInnovative Computing Technology (INTECH'2012)},
pages = {38--45},
year = 2012,
month = {September},
location = {Casablanca, Maroc},
organization = {IEEE},
doi = {10.1109/INTECH.2012.6457801},
url = {http://ieeexplore.ieee.org/xpls/abs_all.jsp?arnumber=6457801},
keywords = {Internet, security of data, RORI index, appropriate
attack mitigation, combination approach, complex
attack effect mitigation, computer attacks,
cost-effective attack mitigation, cost-effectiveness
ratio maximization, critical infrastructure process
control, individual security solutions, optimal
countermeasure selection, return-on-response
investment index, Equations, Indexes, Integrated
circuits, Investments, Mathematical model, Process
control, Security},
abstract = {As new and more sophisticated computer attacks
appear across the Internet, sometimes with unknown
dimensions and criticality, the implementation of
individual security solutions become less effective
and in some cases useless. Instead, a combined
approach is required to guarantee an appropriate and
cost-effective mitigation of such attacks. Most of
the current work suggests the deployment of multiple
countermeasures as a single treatment to mitigate
the effects of complex attacks. However, the
methodology to analyze and evaluate combined
solutions is either hardly explained or very
complicated to implement. This paper, therefore
proposes a simple and well-structured approach to
select the optimal combination of countermeasures by
maximizing the cost-effectiveness ratio of the
countermeasures, this ratio being measured by the
Return on Response Investment (RORI) index. A case
study is provided at the end of the document to show
the applicability of the model over a critical
infrastructure process control.}
}
@inproceedings{granadillo2012individual,
title = {Individual countermeasure selection based on the
return on response investment index},
author = {GRANADILLO, Gustavo Daniel GONZALEZ and DEBAR,
Herv{\'e} and Jacob, Gr{\'e}goire and Gaber,
Chrystel and Achemlal, Mohamed and others},
booktitle = {Proceedings of the Sixth International Conference on
Mathematical Methods, Models, and Architectures for
Computer Network Security (MMM-ACNS'12) },
pages = {156--170},
year = 2012,
month = {October},
url = {http://link.springer.com/chapter/10.1007%2F978-3-642-33704-8_14},
location = {St Petersburg, Russia},
isbn = {978-3-642-33703-1},
volume = 7531,
series = {Lecture Notes in Computer Science},
editor = {Kotenko, Igor and Skormin, Victor},
doi = {10.1007/978-3-642-33704-8_14},
publisher = {Springer Berlin Heidelberg},
keywords = {intrusion response, SIEM},
abstract = {As the number of attacks, and thus the number of
alerts received by Security Information and Event
Management Systems (SIEMs) increases, the need for
appropriate treatment of these alerts has become
essential. The new generation of SIEMs focuses on
the response ability to automate the process of
selecting and deploying countermeasures. However,
current response systems select and deploy security
measures without performing a comprehensive impact
analysis of attacks and response scenarios. This
paper addresses this limitation by proposing a model
for the automated selection of optimal security
countermeasures. In addition, the paper compares
previous mathematical models and studies their
limitations, which lead to the creation of a new
model that evaluates, ranks and selects optimal
countermeasures. The model relies on the
optimization of cost sensitive metrics based on the
Return On Response Investment (RORI) index. The
optimization compares the expected impact of the
attacks when doing nothing with the expected impact
after applying countermeasures. A case study of a
real infrastructure is deployed at the end of the
document to show the applicability of the model over
a Mobile Money Transfer Service.}
}
@incollection{granadillo2012ontology,
title = {An ontology-based model for SIEM environments},
author = {Granadillo, Gustavo Gonzalez and Mustapha, Yosra Ben
and Hachem, Nabil and Debar, Herv{\'e}},
booktitle = {Global Security, Safety and Sustainability \&
e-Democracy},
pages = {148--155},
year = 2012,
publisher = {Springer Berlin Heidelberg},
isbn = {978-3-642-33447-4},
volume = 99,
series = {Lecture Notes of the Institute for Computer
Sciences, Social Informatics and Telecommunications
Engineering},
editor = {Georgiadis, ChristosK. and Jahankhani, Hamid and
Pimenidis, Elias and Bashroush, Rabih and Al-Nemrat,
Ameer},
doi = {10.1007/978-3-642-33448-1_21},
url = {http://link.springer.com/chapter/10.1007%2F978-3-642-33448-1_21},
keywords = {SIEM; Ontology; Data Model},
abstract = {The management of security events, from the analysis
of attacks and risk to the selection of appropriate
countermeasures, has become a major concern for
security analysts and IT
administrators. Furthermore, network and system
devices are designed to be heterogeneous, with
different characteristics and functionalities that
increase the difficulty of these tasks. This paper
introduces an ontology-driven approach to address
the aforementioned problems. The proposed model
takes into account the two main aspects of this
field, the information that is manipulated by SIEM
environments and the operations that are applied to
this information, in order to reach the desired
goals. We present a case study on Botnets to
illustrate the utilization of our model.}
}
@article{granadillo2012ontology-swrl,
title = {An ontology--driven approach to model SIEM
information and operations using the SWRL formalism},
author = {Granadillo, Gustavo Gonzalez and Mustapha, Yosra Ben
and Hachem, Nabil and Debar, Herv{\'e}},
journal = {International Journal of Electronic Security and
Digital Forensics},
volume = 4,
number = 2,
pages = {104--123},
year = 2012,
publisher = {Inderscience},
doi = {10.1504/IJESDF.2012.048412},
url = {http://www.inderscience.com/info/inarticle.php?artid=48412},
keywords = {security events, event management, security
information, SIEM, ontology, SWRL, semantics, logic
rules, data modelling, heterogeneity, reasoning,
risk assessment, countermeasures, botnets},
abstract = {The management of security events, from the risk
analysis to the selection of appropriate
countermeasures, has become a major concern for
security analysts and IT
administrators. Furthermore, the fact that network
and system devices are heterogeneous, increases the
difficulty of these administrative tasks. This paper
introduces an ontology-driven approach to address
the aforementioned problems. The proposed model
takes into account two aspects: the information and
the operations that are manipulated by SIEM
environments in order to reach the desired
goals. The model uses ontologies to provide
simplicity on the description of concepts,
relationships and instances of the security
domain. The semantics web rule languages are used to
describe the logic rules needed to infer
relationships among individuals and classes. A case
study on Botnets is presented at the end of this
paper to illustrate a concrete utilisation of our
model.}
}
@article{granadillo2013rori,
title = {RORI-based countermeasure selection using the OrBAC
formalism},
author = {Granadillo, Gustavo Gonzalez and Belhaouane, Malek
and Debar, Herv{\'e} and Jacob, Gr{\'e}goire},
journal = {International Journal of Information Security},
publisher = {Springer Berlin Heidelberg},
year = 2014,
issn = {1615-5262},
volume = 13,
number = 1,
doi = {10.1007/s10207-013-0207-8},
url = {http://link.springer.com/article/10.1007%2Fs10207-013-0207-8},
keywords = {Countermeasure selection, Impact analysis,
Combination approach, Risk mitigation, Surface
coverage, RORI index, OrBAC model},
pages = {63-79},
abstract = {Attacks against information systems have grown in
sophistication and complexity, making the detection
and reaction process a challenging task for security
administrators. In reaction to these attacks, the
definition of security policies is an effective way
to protect information systems from further damages,
but it requires a great expertise and knowledge. If
stronger security policies can constitute powerful
countermeasures, inappropriate policies, on the
other hand, may result in disastrous consequences
for the organization. The implementation of stronger
security policies requires in many cases the
evaluation and analysis of multiple
countermeasures. Current research promotes the
implementation of multiple countermeasures as a
strategy to react over complex attacks; however, the
methodology is either hardly explained or very
complicated to implement. This paper introduces a
well-structured approach to evaluate and select
optimal countermeasures based on the return on
response investment (RORI) index. An implementation
of a real case study is provided at the end of the
document to show the applicability of the model over
a mobile money transfer service. The service,
security policies and countermeasures are expressed
using the OrBAC formalism.}
}
@inproceedings{hachem2011botnets,
title = {Botnets: Lifecycle and Taxonomy},
author = {Hachem, Nabil and Ben Mustapha, Yosra and
Granadillo, Gustavo Gonzalez and Debar, Herv{\'e}},
booktitle = {Network and Information Systems Security (SAR-SSI),
2011 Conference on},
pages = {1--8},
year = 2011,
month = {May},
location = {La Rochelle, France},
organization = {IEEE},
url = {http://ieeexplore.ieee.org/xpls/abs_all.jsp?arnumber=5931395},
doi = {10.1109/SAR-SSI.2011.5931395},
keywords = {Internet, computer crime, computer network security,
computer viruses, fraud, DDoS attack, Internet,
Zombies, agents, attack phase, attacker, botnet,
fraud, infected computer, large-scale coordinated
attack, life cycle, malware, phishing campaign,
resilience technique, spam, threat, Computers,
Internet, Malware, Protocols, Servers, Topology},
abstract = {The new threat of the Internet, but little known to
the 'general public' is constituted by
botnets. Botnets are networks of infected computers,
which are headed by a pirate called also 'Attacker'
or 'Master'. The botnets are nowadays mainly
responsible for large-scale coordinated attacks. The
attacker can ask the infected computers called
'Agents' or 'Zombies' to perform all sorts of tasks
for him, like sending spam, performing DDoS attacks,
phishing campaigns, delivering malware, or leasing
or selling their botnets to other fraudsters
anywhere. In this paper we present a classification
that reflects the life cycle and current resilience
techniques of botnets, distinguishing the
propagation, the injection, the control and the
attack phases. Then we study the effectiveness of
the adopted taxonomy by applying it to existing
botnets to study their main characteristics. We
conclude by the upcoming steps in our research.}
}
@inproceedings{hachem2012hadega,
title = {HADEGA: A novel MPLS-based mitigation solution to
handle network attacks},
author = {Hachem, Nabil and Debar, Herv{\'e} and
Garcia-Alfaro, Joaquin},
booktitle = {Proceedings of the IEEE 31st International
Performance Computing and Communications Conference
(IPCCC'2012)},
location = {Austin, Texas},
pages = {171--180},
year = 2012,
month = {December},
organization = {IEEE},
url = {http://ieeexplore.ieee.org/xpls/abs_all.jsp?arnumber=6407750},
doi = {10.1109/PCCC.2012.6407750},
keywords = {telecommunication security, telecommunication
standards, HADEGA, MPLS standard, MPLS-based
mitigation solution, adaptive mitigation solution,
multiprotocol label switching, network attack
handling, network detection alerts, IP networks,
Multiprotocol label switching, Network topology,
Quality of service, Routing, Standards,
Multiprotocol Label Switching, Network Attack
Mitigation, Network Security, QoS},
abstract = {We present HADEGA, a novel adaptive mitigation
solution to handle the impact of network attacks. By
extracting information from network detection
alerts, and build upon the Multiprotocol Label
Switching (MPLS) standard, the solution assigns
labels and quality of service treatments to
suspicious flows. As a result, those labeled flows
are controlled and properly handled inside the core
network of service providers. We conducted
simulations in order to evaluate the efficiency of
our approach. Results are presented.}
}
@inproceedings{hachem2013adaptive,
title = {An Adaptive Mitigation Framework for Handling
Suspicious Network Flows via MPLS Policies},
author = {Hachem, Nabil and Garcia-Alfaro, Joaquin and Debar,
Herv{\'e}},
booktitle = {Secure IT Systems - Proceedings of the 18th Nordic
Conference (NordSec 2013)},
location = {Ilulissat, Greenland},
month = {October},
pages = {297--312},
year = 2013,
publisher = {Springer Berlin Heidelberg},
isbn = {978-3-642-41487-9},
volume = 8208,
series = {Lecture Notes in Computer Science},
editor = {Riis Nielson, Hanne and Gollmann, Dieter},
doi = {10.1007/978-3-642-41488-6_20},
url = {http://link.springer.com/chapter/10.1007%2F978-3-642-41488-6_20},
keywords = {Network Security, Policy Management, MPLS, OrBAC},
abstract = {As network attacks become more complex, defence
strategies must provide means to handle more
flexible and dynamic requirements. The Multiprotocol
Label Switching (MPLS) standard is a promising
method to properly handle suspicious flows
participating in such network attacks. Tasks such as
alert data extraction, and MPLS routers
configuration present an entailment to activate the
defence process. This paper introduces a novel
framework to define, generate and implement
mitigation policies on MPLS routers. The activation
of such policies is triggered by the alerts and
expressed using a high level formalism. An
implementation of the approach is presented.}
}
@inproceedings{jacob2010formalization,
title = {Formalization of viruses and malware through process
algebras},
author = {Jacob, Gr{\'e}goire and Filiol, Eric and Debar,
Herv{\'e}},
booktitle = {Proceedings of the International Conference on
Availability, Reliability, and Security (ARES'10)},
pages = {597--602},
year = 2010,
month = {February},
location = {Krakow, Poland},
organization = {IEEE},
keywords = {decidability, invasive software, process algebra,
Join-Calculus, Turing-equivalent formalisms,
abstract virology, detection undecidability, malware
formalization, process algebras, process-based
malware model, viral models, virus formalization,
Algebra, Availability, Calculus, Computer viruses,
Concurrent computing, Jacobian matrices, Protection,
Security, Steganography, detection, malware,
prevention},
doi = {10.1109/ARES.2010.59},
abstract = {Abstract virology has seen the apparition of
successive viral models, all based on
Turing-equivalent formalisms. Considering recent
malware, these are only partially covered because
functional formalisms do not support interactive
computations. This article provides a basis for a
unified malware model, founded on the
Join-Calculus. In terms of expressiveness, the
process-based model supports the fundamental notion
of self-replication but also interactions,
concurrency and non-termination to cover evolved
malware. In terms of protection, detection
undecidability and prevention by isolation still
hold. Additional results are established: calculus
fragments where detection is decidable, definition
of a non-infection property, potential solutions to
restrict propagation.}
}
@inproceedings{kheir2010ex,
title = {Ex-SDF: An Extended Service Dependency Framework for
Intrusion Impact Assessment},
author = {Kheir, Nizar and Cuppens-Boulahia, Nora and Cuppens,
Fr{\'e}d{\'e}ric and Debar, Herv{\'e}},
journal = {Security and Privacy--Silver Linings in the Cloud},
pages = {148--160},
year = 2010,
isbn = {978-3-642-15256-6},
booktitle = {Proceedings of the 25th IFIP TC-11 International
Information Security Conference (SEC'2010)},
location = {Brisbane, Australia},
month = {September},
volume = 330,
series = {IFIP Advances in Information and Communication
Technology},
editor = {Rannenberg, Kai and Varadharajan, Vijay and Weber,
Christian},
doi = {10.1007/978-3-642-15257-3_14},
url = {http://link.springer.com/chapter/10.1007%2F978-3-642-15257-3_14},
publisher = {Springer Berlin Heidelberg},
keyword = {intrusion response, service dependencies},
abstract = {Information systems are increasingly dependent on
highly distributed architectures that include
multiple dependencies. Even basic attacks like
script-kiddies have drastic effects on target
systems as they easily spread through existing
dependencies. Unless intrusion effects are
accurately assessed, response systems will still be
blinded when selecting optimal responses. In fact,
using only response costs as a basis to select
responses is still meaningless if not compared to
intrusion costs. While conventional responses
provoke mostly availability impacts, intrusions
affect confidentiality, integrity and availability.
This paper develops an approach to assess intrusion
impacts on IT systems. It uses service dependencies
as frames for propagating impacts. It goes beyond
existing methods which mostly use dependability
analysis techniques. It explores service privileges
as being the main targets for attackers, and the
tunable parameters for intrusion response. The
approach presented in this paper is implemented as a
simulation-based framework and demonstrated for the
example of a vehicle reservation service.}
}
@inproceedings{kheir2010service,
title = {A service dependency model for cost-sensitive
intrusion response},
author = {Kheir, Nizar and Cuppens-Boulahia, Nora and Cuppens,
Fr{\'e}d{\'e}ric and Debar, Herv{\'e}},
pages = {626--642},
year = 2010,
isbn = {978-3-642-15496-6},
booktitle = {Proceedings of the 15th European Symposium on
Research in Computer Security},
location = {Athens, Greece},
month = {September},
volume = 6345,
series = {Lecture Notes in Computer Science},
editor = {Gritzalis, Dimitris and Preneel, Bart and
Theoharidou, Marianthi},
doi = {10.1007/978-3-642-15497-3_38},
url = {http://link.springer.com/chapter/10.1007%2F978-3-642-15497-3_38},
publisher = {Springer Berlin Heidelberg},
keywords = {intrusion detection, intrusion response, cost
evaluation, service dependencies},
abstract = {Recent advances in intrusion detection and
prevention have brought promising solutions to
enhance IT security. Despite these efforts, the
battle with cyber attackers has reached a
deadlock. While attackers always try to unveil new
vulnerabilities, security experts are bounded to
keep their softwares compliant with the latest
updates. Intrusion response systems are thus
relegated to a second rank because no one trusts
them to modify system configuration during runtime.
Current response cost evaluation techniques do not
cover all impact aspects, favoring availability over
confidentiality and integrity. They do not profit
from the findings in intrusion prevention which led
to powerful models including vulnerability graphs,
exploit graphs, etc. This paper bridges the gap
between these models and service dependency models
that are used for response evaluation. It proposes a
new service dependency representation that enables
intrusion and response impact evaluation. The
outcome is a service dependency model and a complete
methodology to use this model in order to evaluate
intrusion and response costs. The latter covers
response collateral damages and positive response
effects as they reduce intrusion costs.}
}
@inproceedings{lacoste2012self,
title = {Self-Defending Clouds: Myth and Realities},
author = {Lacoste, Marc and Wailly, Aur{\'e}lien and Debar,
Herv{\'e}},
booktitle = {Proceedings of C\&ESAR 2012},
month = {November},
year = {2012},
keywords = {Cloud Security Supervision, Cloud Security
Management, Self- Defending Clouds, Cloud Threats,
Autonomic Security, IaaS Infrastructures},
abstract = {Security is a growing concern as it remains the last
barrier to widespread adoption of cloud
environments. However, is today’s cloud security
Lucy in the Sky with Diamonds? Expected to be
strong, flexible, efficient, and simple? But
surprisingly, being neither? A new approach, making
clouds self-defending , has been heralded as a
possible element of answer to the cloud protection
chal- lenge. This paper presents an overview of
today’s state and advances in the field of cloud
infrastructure self-defense. Four key
self-protection principles are iden- tified for IaaS
self-protection to be effective. For each layer,
mechanisms actually deployed to deliver security are
analyzed to see how well they fulfill those prin-
ciples. The main remaining research challenges are
also discussed to yield truly mature self-defending
clouds.}
}
@inproceedings{levillain2012one,
title = {One Year of Internet SSL measurement},
author = {Levillain, Olivier and {\'E}balard, Arnaud and
Morin, Benjamin and Debar, Herv{\'e} and others},
booktitle = {Proceedings of the 28th Annual Computer Security
Applications Conference (ACSAC'2012)},
year = 2012,
isbn = {978-1-4503-1312-4},
location = {Orlando, Florida},
pages = {11--20},
numpages = 10,
url = {http://doi.acm.org/10.1145/2420950.2420953},
doi = {10.1145/2420950.2420953},
acmid = 2420953,
publisher = {ACM},
address = {New York, NY, USA},
keywords = {HTTPS, SSL/TLS, X.509, certificates, internet
measure},
abstract = {Over the years, SSL/TLS has become an essential part
of internet security. As such, it should offer
robust and state-of-the-art security, in particular
for HTTPS, its first application. Theoretically, the
protocol allows for a trade-off between secure
algorithms and decent performance. Yet in practice,
servers do not always support the latest version of
the protocol, nor do they all enforce strong
cryptographic algorithms. To assess the quality of
HTTPS servers in the wild, we enumerated HTTPS
servers on the internet in July 2010 and July
2011. We sent several stimuli to the servers to
gather detailed information. We then analysed some
parameters of the collected data and looked at how
they evolved. We also focused on two subsets of TLS
hosts within our measure: the trusted hosts
(possessing a valid certificate at the time of the
probing) and the EV hosts (presenting a trusted,
so-called Extended Validation certificate). Our
contributions rely on this methodology: the stimuli
we sent, the criteria we studied and the subsets we
focused on. Moreover, even if EV servers present a
somewhat improved certificate quality over the TLS
hosts, we show they do not offer overall high
quality sessions, which could and should be
improved.}
}
@inproceedings{levillain2014parsifal,
title = {Parsifal: writing efficient and robust binary
parsers, quickly},
author = {Levillain, Olivier and Debar, Herv{\'e} and Morin,
Benjamin},
booktitle = {Proceedings of the 8th International Conference on
Risks and Security of Internet and Systems
(CRISIS'2013)},
year = 2013,
month = {October},
pages = {1-6},
doi = {10.1109/CRiSIS.2013.6766344},
url = {http://ieeexplore.ieee.org/xpl/mostRecentIssue.jsp?punumber=6749561},
keywords = {parsers},
abstract = {Parsers are pervasive software basic blocks: as soon
as a program needs to communicate with another
program or to read a file, a parser is
involved. However, writing robust parsers can be
difficult, as is revealed by the amount of bugs and
vulnerabilities related to programming errors in
parsers. In particular, network analysis tools can
be very complex to implement: for example, the
Wireshark project regularly publishes security
patches on its various dissectors. As security
researchers, we need robust tools on which we can
depend. The starting point of Parsifal was a study
of large amounts of SSL data. The data collected
contained legitimate SSL messages, as well as
invalid messages and other protocols (HTTP, SSH). To
face this challenge and extract relevant
information, we wrote several parsers, using
different languages, which resulted in Parsifal, an
OCaml-based parsing engine. Writing parsers and
analysing data not only helped us better understand
SSL/TLS, but also X.509 and BGP/MRT. More recently,
we have started studying Kerberos messages. The
contribution of Parsifal to security is
twofold. First we provide sound tools to analyse
complex file formats or network protocols. Secondly
we implement robust detection/sanitization
systems. The goal of this tutorial is to present
Parsifal and to use it to write a network protocol
parser (DNS) and a file format parser (PNG). The PNG
parser will then be used to build a PNG
sanitizer. Alternatively, an X.509 certificate
signing request validator can be implemented.}
}
@article{me2010new,
title = {New Directions in Intrusion Detection and Alert
Correlation},
author = {M{\'e}, Ludovic and Debar, Herv{\'e}},
journal = {Information Interaction Intelligence (I3)},
volume = 10,
number = 1,
pages = {11--31},
year = 2010,
publisher = {C{\'e}padu{\`e}s},
keywords = {intrusion detection, correlation, diagnosis, test},
abstract = {In this paper, we examine the current state of
research and commercial technology in intrusion
detection.We derive issues that need to be studied
by the research community to make progress in the
field and propose 13 possible research directions
and solutions to the problems or shortcomings
identified. }
}
@inproceedings{mustapha2012limitation,
title = {Limitation of Honeypot/Honeynet Databases to Enhance
Alert Correlation},
author = {Mustapha, Yosra and D{\'e}bar, Herv{\'e} and Jacob,
Gr{\'e}goire},
pages = {203--217},
year = 2012,
booktitle = {Proceedings of the 6th International Conference on
Mathematical Methods, Models and Architectures for
Computer Network Security (MMM-ACNS'2012)},
location = {St. Petersburg, Russia},
month = {October},
isbn = {978-3-642-33703-1},
volume = 7531,
series = {Lecture Notes in Computer Science},
editor = {Kotenko, Igor and Skormin, Victor},
doi = {10.1007/978-3-642-33704-8_18},
url = {http://dx.doi.org/10.1007/978-3-642-33704-8_18},
publisher = {Springer Berlin Heidelberg},
keywords = {intrusion detection, alert correlation, honeypots},
abstract = {In SIEM environments, security analysts process
massive amount of alerts often imprecise. Alert
correlation has been designed to efficiently analyze
this large volume of alerts. However, a major
limitation of existing correlation techniques is
that they focus on the local knowledge of alerts and
ignore the global view of the threat landscape. In
this paper, we introduce an alert enrichment
strategy that aims at improving the local domain
knowledge about the event with relevant global
information about the threat in order to enhance the
security event correlation process. Today, the most
prominent sources of information about the global
threat landscape are the large honeypot/honeynet
infrastructures which allow us to gather more
in-depth insights on the modus operandi of attackers
by looking at the threat dynamics. In this paper, we
explore four honeypot databases that collect
information about malware propagation and security
information about web-based server profile. We
evaluate the use of these databases to correlate
local alerts with global knowledge. Our experiments
show that the information stored in current honeypot
databases suffers from several limitations related
to: the interaction level of honeypots that
influences their coverage and their analysis of the
attacker’s activities, collection of raw data which
may include imprecise or voluminous information, the
lack of standardization in the information
representation which hinder cross-references between
different databases, the lack of documentation
describing the available information.}
}
@incollection{mustapha2013service,
title = {Service Dependencies-Aware Policy Enforcement
Framework Based on Hierarchical Colored Petri Net},
author = {Mustapha, Yosra Ben and Debar, Herv{\'e}},
booktitle = {Proceedings of the International Symposium on
Security in Computing and Communications
(SSCC'2013)},
location = {Mysore, India},
month = {August},
pages = {313--321},
year = 2013,
publisher = {Springer Berlin Heidelberg},
isbn = {978-3-642-40575-4},
volume = 377,
series = {Communications in Computer and Information Science},
editor = {Thampi, SabuM. and Atrey, PradeepK. and Fan, Chun-I
and Perez, Gregorio-Martinez},
doi = {10.1007/978-3-642-40576-1_31},
url = {http://dx.doi.org/10.1007/978-3-642-40576-1_31},
keywords = {intrusion detection, alert correlation, intrusion
response},
abstract = {As computer and network security threats become more
sophisticated and the number of service dependencies
is increasing, optimal response decision is becoming
a challenging task for security administrators. They
should deploy and implement proper network security
policy enforcement mechanisms in order to apply the
appropriate countermeasures and defense strategy.
In this paper, we propose a novel modeling framework
which considers the service dependencies while
identifying and selecting the appropriate Policy
Enforcement Points during an intrusion response
process. First, we present the security implications
of the service dependencies that have been developed
in the literature. Second, we give an overview of
Colored Petri Nets (CPN) and Hierarchical CPN (HCPN)
and its application on network security. Third, we
specify our Service Dependencies-aware Policy
Enforcement Framework which is based on the
application of HCPN. Finally and to illustrate the
advantage of our approach, we present a webmail
application use case with the integration of
different Policy Enforcement Points.}
}
@inproceedings{rieke2012challenges,
title = {Challenges for Advanced Security Monitoring--The
MASSIF Project},
author = {Rieke, Roland and Prieto, Elsa and Diaz, Rodrigo and
Debar, Herv{\'e} and Hutchison, Andrew},
booktitle = {Proceedings of the 9th International on Conference
Trust, Privacy and Security in Digital Business
(TrustBus 2012)},
location = {Vienna, Austria},
month = {September},
pages = {222--223},
year = 2012,
publisher = {Springer Berlin/Heidelberg},
isbn = {978-3-642-32286-0},
volume = 7449,
series = {Lecture Notes in Computer Science},
editor = {Fischer-Hübner, Simone and Katsikas, Sokratis and
Quirchmayr, Gerald},
doi = {10.1007/978-3-642-32287-7_23},
url = {http://dx.doi.org/10.1007/978-3-642-32287-7_23},
keywords = {security information and event management, SIEM,
intrusion detection},
abstract = {The vision of creating a next-generation Security
Information and Event Management environment drives
the development of an architecture which provides
for trustworthy and resilient collection of security
events from source systems, processes and
applications. A number of novel inspection and
analysis techniques are applied to the events
collected to provide high-level situational security
awareness, not only on the network level but also at
the service level where high-level threats such as
money laundering appear. An anticipatory impact
analysis will predict the outcome of threats and
mitigation strategies and thus enable proactive and
dynamic response.}
}
@article{schoo2011challenges,
title = {Challenges for Cloud Networking Security},
author = {Schoo, Peter and Fusenig, Volker and Souza, Victor
and Melo, M{\'a}rcio and Murray, Paul and Debar,
Herv{\'e} and Medhioub, Houssem and Zeghlache,
Djamal},
journal = {Mobile Networks and Management - Revised Selected
Papers of the Second International ICST Conference
(MONAMI'2010)},
location = {Santander, Spain},
pages = {298--313},
year = 2011,
isbn = {978-3-642-21443-1},
volume = 68,
series = {Lecture Notes of the Institute for Computer
Sciences, Social Informatics and Telecommunications
Engineering},
editor = {Pentikousis, Kostas and Agüero, Ramón and
García-Arranz, Marta and Papavassiliou, Symeon},
doi = {10.1007/978-3-642-21444-8_26},
url = {http://dx.doi.org/10.1007/978-3-642-21444-8_26},
publisher = {Springer Berlin Heidelberg},
keywords = {Cloud Networking, Cloud Computing, Network
Virtualisation, Security},
abstract = {Cloud computing is widely considered as an
attractive service model since the users commitments
for investment and operations are minimised, and
costs are in direct relation to usage and
demand. However, when networking aspects for
distributed clouds are considered, there is little
support and the effort is often underestimated. The
project SAIL is addressing cloud networking as the
combination of management for cloud computing and
vital networking capabilities between distributed
cloud resources involved to improve the management
of both. This position paper presents new security
challenges as considered in SAIL for ensuring
legitimate usage of cloud networking resources and
for preventing misuse.}
}
@inproceedings{wailly2011towards,
title = {Towards Multi-Layer Autonomic Isolation of Cloud
Computing and Networking Resources},
author = {Wailly, Aur{\'e}lien and Lacoste, Marc and Debar,
Herv{\'e}},
booktitle = {Proceedings of the 2011 Conference on Network and
Information Systems Security (SAR-SSI'2011)},
pages = {1--9},
year = 2011,
organization = {IEEE},
url = {http://aurelien.wail.ly/publications/sarssi-2011.pdf},
keywords = {cloud computing, computer network security, IaaS
infrastructure, administration, cloud computing,
cloud resource isolation, flexible approach,
multilayer autonomic isolation, networking
resources, reconciling computing, security
components, Cloud computing, Complexity theory,
Computer architecture, Data privacy, Security,
Virtual machine monitors, Virtual private networks},
abstract = {This paper describes a flexible approach to manage
autonomically cloud resource isolation between
different layers of an IaaS infrastructure,
reconciling computing and network views. The
corresponding framework overcomes fragmentation of
security components and automates their
administration by orchestrating different autonomic
loops, vertically (between layers) and horizontally
(between views).}
}
@inproceedings{wailly2012kungfuvisor,
title = {KungFuVisor: enabling hypervisor self-defense},
author = {WAILLY, Aurelien and Lacoste, Marc and DEBAR,
Herv{\'e}},
booktitle = {EuroDW'12: The 6th EuroSys Doctoral Workshop},
year = 2012,
location = {Bern, Switzerland},
url = {http://aurelien.wail.ly/publications/eurodw-2012.pdf},
abstract = {Recently, some of the most potent attacks against
cloud computing infrastructures target their very
foundation: the hypervisor or Virtual Machine
Monitor (VMM). In each case, the main attack vector
is a poorly confined device driver in the
virtualization layer, enabling to bypass resource
isolation and take complete infrastructure
control. Current architectures offer no protection
against such attacks. At best, they attempt to
contain but do not eradicate the detected threat,
usually with static, hard-to-manage defense
strategies. This paper proposes an altogether
different approach by presenting KungFuVisor, a
framework to build self-defending hypervisors. The
framework regulates hypervisor protection through
several coordinated autonomic security loops which
supervise different VMM layers through well-defined
hooks. Thus, interactions between a device driver
and its VMM environment may be strictly monitored
and controlled automatically. The result is a very
flexible self-protection architecture, enabling to
enforce dynamically a rich spectrum of remediation
actions over different parts of the VMM, also
facilitating defense strategy administration.}
}
@inproceedings{wailly2012vespa,
title = {VESPA: multi-layered self-protection for cloud
resources},
author = {Wailly, Aur{\'e}lien and Lacoste, Marc and Debar,
Herv{\'e}},
booktitle = {Proceedings of the 9th International Conference on
Autonomic Computing (ICAC '12)},
pages = {155--160},
year = 2012,
isbn = {978-1-4503-1520-3},
location = {San Jose, California, USA},
numpages = 6,
url = {http://doi.acm.org/10.1145/2371536.2371564},
doi = {10.1145/2371536.2371564},
acmid = 2371564,
publisher = {ACM},
address = {New York, NY, USA},
keywords = {autonomic computing, cloud security, iaas,
self-protection},
abstract = {Self-protection has recently raised growing interest
as possible element of answer to the cloud computing
infrastructure protection challenge. Faced with
multiple threats and heterogeneous defense
mechanisms, the autonomic approach proposes simpler,
stronger, and more efficient cloud security
management. Yet, previous solutions fall at the last
hurdle as they overlook key features of the cloud,
by lack of flexible security policies, cross-layered
defense, multiple control granularities, and open
security architectures. This paper presents VESPA, a
self-protection architecture for cloud
infrastructures overcoming such limitations. VESPA
is policy-based, and regulates security at two
levels, both within and across infrastructure
layers. Flexible coordination between
self-protection loops allows enforcing a rich
spectrum of security strategies such as cross-layer
detection and reaction. A multi-plane extensible
architecture also enables simple integration of
commodity detection and reaction
components. Evaluation of a VESPA implementation
shows that the design is applicable for effective
and flexible self-protection of cloud
infrastructures.}
}
@inproceedings{waillyretrovisor,
title = {RetroVisor: Nested Virtualization for Multi IaaS VM},
author = {Wailly, Aur{\'e}lien and Lacoste, Marc and Debar,
Herv{\'e}},
booktitle = {Conf\'erence d'informatique en Parall{\'e}lisme,
Architecture et Syst{\`}eme},
year = 2013,
url = {http://aurelien.wail.ly/publications/compas-2013.pdf},
abstract = {Nested virtualization provides an extra layer of
virtualization to enhance security with fairly
reasonable performance impact. Usercentric vision of
cloud computing gives a high-level of control on the
whole infrastructure, such as untrusted dom0. This
paper introduces RetroVisor, a security architecture
to seamlessly run a virtual machine (VM) on multiple
hypervisors simultaneously. We argue that this
approach delivers high-availability and provides
strong guarantees on multi IaaS infrastructures. The
user can perform detection and remediation against
potential hypervisors weaknesses, unexpected
behaviors and exploits.}
}
@inproceedings{wazan2013attribute,
title = {Attribute-based Mining Process for the
Organization-Based Access Control Model},
author = {Wazan, Ahmad Samer and Blanc, Gregory and Debar,
Herv{\'e} and Garcia-Alfaro, Joaquin},
booktitle = {Proceedings of the 12th IEEE International
Conference on Trust, Security and Privacy in
Computing and Communications (TrustCom'2013 },
pages = {421--430},
year = 2013,
organization = {IEEE},
doi = {10.1109/TrustCom.2013.53},
month = {July},
keywords = {authorisation, data mining, OrBAC model, abstract
concepts, abstraction objective, attribute-based
mining process, organization-based access control
model, security access control models, security
policies, Abstracts, Access control, Concrete,
Context, Organizations, Permission, Access Control,
Policy Management, Role Mining, Security},
abstract = {Since the late 60's, different security access
control models have been proposed. Their rationale
is to conceive high level abstract concepts that
permit to manage the security policies of
organizations efficiently. However, enforcing these
models is not a straightforward task, especially
when they do not consider the reality of
organizations which may have ad-hoc security
policies already deployed. Another issue is the
vagueness of their abstract concepts. We propose to
bridge the gap between the theory of access control
models and the reality of organizations by defining
an attribute-based mining process that deduce the
abstract concepts starting from the attribute
level. Additionaly, the attributes allow us to
semantically enrich the obtained results. We have
selected the Organization-Based Access Control
(OrBAC) model as the abstraction objective of our
study.}
}