diff --git a/build.sh b/build.sh index 5bba9d710439461d597ba2df303831eb536e9066..bfe3bef1d7249e77ac27986678ee05097ad438f1 100755 --- a/build.sh +++ b/build.sh @@ -106,11 +106,11 @@ build_from_source xenon main.tex xenon-computing-model.pdf build_from_source sc18 SC18.tex *.png -#build_from_source mw-esaco mw-esaco.tex *.png -#build_from_source mw-kube mw-kube.tex -#build_from_source mw-cdmi-storm mw-cdmi-storm.tex *.png *.jpeg -#build_from_source mw-software mw-software.tex -#build_from_source mw-iam mw-iam.tex +## Research and Developments +build_from_source sd_iam main.tex biblio.bib *.png +build_from_source sd_storm main.tex biblio.bib *.png +build_from_source sd_storm2 main.tex biblio.bib *.png +build_from_source sd_nginx_voms main.tex biblio.bib *.png #build_from_source na62 na62.tex link_pdf padme 2019_PADMEcontribution.pdf @@ -140,16 +140,17 @@ build_from_source ds_eoscpilot ds_eoscpilot.tex *.png build_from_source ds_eoschub ds_eoschub.tex *.png build_from_source ds_cloud_c ds_cloud_c.tex *.png build_from_source ds_infn_cc ds_infn_cc.tex *.png -build_from_source ds_devops_pe ds_devops_pe.tex +build_from_source ds_devops_pe ds_devops_pe.tex *.png #build_from_source cloud_b cloud_b.tex *.png *.jpg #build_from_source cloud_c cloud_c.tex *.png *.pdf #build_from_source cloud_d cloud_d.tex *.png build_from_source sdds-xdc SDDS-XDC.tex *.png build_from_source sdds-deep SDDS-DEEP.tex *.png build_from_source PhD_DataScience_2018 PhD-DataScience-2018.tex +build_from_source chnet dhlab.tex *.png build_from_source pett pett.tex bibliopett.bib -#build_from_source iso iso.tex 27001.png biblioiso.bib +build_from_source summerstudent summerstudent.tex *.png pdflatex ${topdir}/cnaf-annual-report-2018.tex \ && pdflatex ${topdir}/cnaf-annual-report-2018.tex 2> /dev/null \ diff --git a/cnaf-annual-report-2018.tex b/cnaf-annual-report-2018.tex index bee284c2f41ee40d95079dfb0ca22ee4c683c93d..7cc8b1177cc38b9a3dbf29b6f0c9459a209896da 100644 --- a/cnaf-annual-report-2018.tex +++ b/cnaf-annual-report-2018.tex @@ -144,7 +144,7 @@ Introducing the sixth annual report of CNAF... %\includepdf[pages=1, pagecommand={\thispagestyle{empty}}]{papers/experiment.pdf} \cleardoublepage \ia{User and Operational Support at CNAF}{user-support} -\ia{ALICE computing at the INFN CNAF Tier1}{alice} +\ia{ALICE computing at the INFN CNAF Tier 1}{alice} \ia{AMS-02 data processing and analysis at CNAF}{ams} \ia{The ATLAS experiment at the INFN CNAF Tier 1}{atlas} \ia{The Borexino experiment at the INFN-CNAF}{borexino} @@ -183,12 +183,12 @@ Introducing the sixth annual report of CNAF... \addcontentsline{toc}{part}{The Tier 1 and Data center} \addtocontents{toc}{\protect\mbox{}\protect\hrulefill\par} %\includepdf[pages=1, pagecommand={\thispagestyle{empty}}]{papers/datacenter.pdf} -\ia{The INFN Tier-1}{tier1} -\ia{The INFN-Tier1: the computing farm}{farming} +\ia{The INFN Tier 1}{tier1} +\ia{The INFN-Tier 1: the computing farm}{farming} \ia{Data management and storage systems}{storage} %\ia{Evaluation of the ClusterStor G200 Storage System}{seagate} %\ia{Activity of the INFN CNAF Long Term Data Preservation (LTDP) group}{ltpd} -\ia{The INFN-Tier1: Network and Security}{net} +\ia{The INFN-Tier 1: Network and Security}{net} %\ia{Cooling system upgrade and Power Usage Effectiveness improvement in the INFN CNAF Tier 1 infrastructure}{infra} %\ia{National ICT Services Infrastructure and Services}{ssnn1} %\ia{National ICT Services hardware and software infrastructures for Central Services}{ssnn2} @@ -204,36 +204,36 @@ CREAM-CE/LSF to HTCondor-CE/HTCondor}{HTC_testbed} \addtocontents{toc}{\protect\mbox{}\protect\hrulefill\par} %\includepdf[pages=1, pagecommand={\thispagestyle{empty}}]{papers/research.pdf} \cleardoublepage -\ia{Internal Auditing INFN for GDPR compliance}{audit} -%\ia{Continuous Integration and Delivery with Kubernetes}{mw-kube} -%\ia{Middleware support, maintenance and development}{mw-software} -%\ia{Evolving the INDIGO IAM service}{mw-iam} -%\ia{Esaco: an OAuth/OIDC token introspection service}{mw-esaco} -%\ia{StoRM Quality of Service and Data Lifecycle support through CDMI}{mw-cdmi-storm} -%\ia{A low-cost platform for space software development}{lowcostdev} +\ia{Evolving the INDIGO IAM service}{sd_iam} +\ia{StoRM maintenance and evolution}{sd_storm} +\ia{StoRM 2: initial design and development activities}{sd_storm2} +\ia{A VOMS module for the Nginx web server}{sd_nginx_voms} \ia{Comparing Data Mining Techniques for Software Defect Prediction}{dmsq} %\ia{Summary of a tutorial on statistical methods}{st} %\ia{Dynfarm: Transition to Production}{dynfarm} %\ia{Official testing and increased compatibility for Dataclient}{dataclient} -%\ia{Common software lifecycle management in external projects: Placeholder}{ds_devops_pe} +\ia{Common software lifecycle management in external projects:}{ds_devops_pe} \ia{EOSC-hub: contributions to project achievements}{ds_eoschub} \ia{EOSCpilot - Interoperability aspects and results}{ds_eoscpilot} \ia{Cloud@CNAF Management and Evolution}{ds_cloud_c} \ia{INFN CorporateCloud: Management and evolution}{ds_infn_cc} \ia{eXtreme DataCloud project: Advanced data management services for distributed e-infrastructures}{sdds-xdc} \ia{DEEP-HybridDataCloud project: Hybrid services for distributed e-infrastructures}{sdds-deep} +\ia{DHLab: a digital library for the INFN Cultural Heritage Network}{chnet} \cleardoublepage \thispagestyle{empty} \phantomsection -\addcontentsline{toc}{part}{Technology transfer and other projects} +\addcontentsline{toc}{part}{Technology transfer, outreach and more} \addtocontents{toc}{\protect\mbox{}\protect\hrulefill\par} %\includepdf[pages=1, pagecommand={\thispagestyle{empty}}]{papers/transfer.pdf} \cleardoublepage \ia{External Projects and Technology Transfer}{pett} -%\ia{The ISO 27001 Certification}{iso} -%\ia{COmputing on SoC Architectures: the COSA project at CNAF}{cosa} -%\ia{The ExaNeSt project - activities at CNAF}{exanest} +\ia{INFN CNAF log analysis: a first experience with summer students}{summerstudent} +\ia{The annual international conference of high performance computing: SC18 from INFN point of view}{sc18} +\ia{Infrastructures and Big Data processing as pillars in the XXXIII PhD couse in Data Science and Computation}{PhD_DataScience_2018} +\ia{Internal Auditing INFN for GDPR compliance}{audit} + \cleardoublepage \thispagestyle{empty} diff --git a/contributions/HTC_testbed/HTC_testbed_AR2018.tex b/contributions/HTC_testbed/HTC_testbed_AR2018.tex index f2326c652e7f8b8559072efd39ca855c5c80e4b3..615ebdf04c9efc65df66180324b515a460dad783 100644 --- a/contributions/HTC_testbed/HTC_testbed_AR2018.tex +++ b/contributions/HTC_testbed/HTC_testbed_AR2018.tex @@ -14,12 +14,12 @@ \title{Evaluating Migration of INFN--T1 from CREAM-CE/LSF to HTCondor-CE/HTCondor} \author{Stefano Dal Pra$^1$} -\address{$^1$ INFN-CNAF, viale Berti-Pichat 6/2, 40127 Bologna, Italy} +\address{$^1$ INFN-CNAF, Bologna, IT} \ead{stefano.dalpra@cnaf.infn.it} \begin{abstract} - The Tier--1 datacentre provides computing resources for a variety of HEP and + The Tier 1 data center provides computing resources for a variety of HEP and Astrophysics experiments, organized in Virtual Organization submitting their jobs to our computing facilities through Computing Elements, acting as Grid interfaces to the Local Resource Manager. We planned to phase--out our @@ -62,7 +62,7 @@ accounting systems can be integrated with HTCondor and HTCondor--CE and finally to devise a reasonable migration plan, a simple small HTCondor 8.6.13 cluster has been set up during spring 2018. A HTCondor--CE was soon added, in late April. HTCondor is a very mature opensource product, deployed at several -major Tier-1 for years, thus we already know that it will certainly fit our +major Tier 1 for years, thus we already know that it will certainly fit our use cases. The HTCondor--CE, on the other hand, is a more recent product, and a number of issues might be too problematic for us to deal with. Our focus is about ensuring that this CE implementation can be a viable solution for us. diff --git a/contributions/PhD_DataScience_2018/PhD-DataScience-2018.tex b/contributions/PhD_DataScience_2018/PhD-DataScience-2018.tex index 573887161a6e91c15db5a79d275f7c653eb04da2..6e7a99499f82271c77f4d8e6cea67f89a6d93ec5 100644 --- a/contributions/PhD_DataScience_2018/PhD-DataScience-2018.tex +++ b/contributions/PhD_DataScience_2018/PhD-DataScience-2018.tex @@ -4,11 +4,11 @@ \title{ Infrastructures and Big Data processing as pillars in the XXXIII PhD couse in Data Sciece and Computation} %\address{Production Editor, \jpcs, \iopp, Dirac House, Temple Back, Bristol BS1~6BE, UK} -\author{D. Salomoni$^1$, A. Costantini$^1$, C. D. Duma$^1$, B. Martelli$^1$, D. Cesini$^1$, E. Fattibene$^1$ and D. Michelotto $^1$ +\author{D. Salomoni$^1$, A. Costantini$^1$, C. D. Duma$^1$, B. Martelli$^1$, D. Cesini$^1$, E. Fattibene$^1$, D. Michelotto $^1$ % etc. } -\address{$^1$ INFN-CNAF, Bologna, Italy} +\address{$^1$ INFN-CNAF, Bologna, IT} \ead{davide.salomoni@cnaf.infn.it} diff --git a/contributions/alice/main.tex b/contributions/alice/main.tex index 9a8b1a845df329d1c72a029bccaafa235ccbda20..3cc7f34a857c14f20d55d4da15c515939d8a0a01 100644 --- a/contributions/alice/main.tex +++ b/contributions/alice/main.tex @@ -25,20 +25,20 @@ \newcommand{\ctau} {$c \tau$} \newcommand{\ct} {$ct$} \newcommand{\LKz} {$\Lambda$/$K^{0}$} -\newcommand{\s} {$\sqrt{s}$} -\newcommand{\snn} {$\sqrt{s_{\mathrm{NN}}}$} +\newcommand{\s} {\sqrt{s}} +\newcommand{\snn} {\sqrt{s_{\mathrm{NN}}}} \newcommand{\dndy} {d$N$/d$y$} \newcommand{\OO} {$\mathrm{O^{2}}$} \begin{document} -\title{ALICE computing at the INFN CNAF Tier1} +\title{ALICE computing at the INFN CNAF Tier 1} -\author{Stefano Piano$^1$, Domenico Elia$^2$, Stefano Bagnasco$^3$, Francesco Noferini$^4$, Nicol\`o Jacazio$^5$, Gioacchino Vino$^2$} -\address{$^1$ INFN Sezione di Trieste, Trieste, Italy} -\address{$^2$ INFN Sezione di Bari, Bari, Italy} -\address{$^3$ INFN Sezione di Torino, Torino, Italy} -\address{$^4$ INFN Sezione di Bologna, Bologna, Italy} -\address{$^5$ INFN CNAF, Bologna, Italy} +\author{S. Piano$^1$, D. Elia$^2$, S. Bagnasco$^3$, F. Noferini$^4$, N. Jacazio$^5$, G. Vino$^2$} +\address{$^1$ INFN Sezione di Trieste, Trieste, IT} +\address{$^2$ INFN Sezione di Bari, Bari, IT} +\address{$^3$ INFN Sezione di Torino, Torino, IT} +\address{$^4$ INFN Sezione di Bologna, Bologna, IT} +\address{$^5$ INFN-CNAF, Bologna, IT} \ead{stefano.piano@ts.infn.it} @@ -56,13 +56,13 @@ The main goal of ALICE is the study of the hot and dense matter created in ultra The main goal of the run in 2018 was to complete the approved Run2 physics program and it was fully achieved thanks to the excellent performance of the apparatus. -ALICE resumed data taking with beams in April at the restart of LHC operation with pp collisions (\s~=~13~TeV). ALICE continued to collect statistics with pp collisions from April 2nd to October 25th with the same trigger mix as in 2017. As planned, ALICE was operating with pp luminosity leveled to $\mathrm{2.6\times10^{30}}$ $\mathrm{cm^{-2}s^{-1}}$ providing an interaction rate of 150 kHz. The HLT compression factor was improved to 8.5 throughout the data taking, thus the HLT was able to reject the higher amount of spurious clusters, which were anticipated with Ar-CO2 gas mixture in the TPC. The average RAW data event size after compression was 1.7MB at the nominal interaction rate (150 kHz), exactly as expected and used for the resource calculations. At the end of the pp period, ALICE arrived at 43\% combined efficiency (LHC availability 47\% * ALICE efficiency 92\%). +ALICE resumed data taking with beams in April at the restart of LHC operation with pp collisions ($\s=13$~TeV). ALICE continued to collect statistics with pp collisions from April 2nd to October 25th with the same trigger mix as in 2017. As planned, ALICE was operating with pp luminosity leveled to $\mathrm{2.6\times10^{30}}$ $\mathrm{cm^{-2}s^{-1}}$ providing an interaction rate of 150 kHz. The HLT compression factor was improved to 8.5 throughout the data taking, thus the HLT was able to reject the higher amount of spurious clusters, which were anticipated with Ar-CO2 gas mixture in the TPC. The average RAW data event size after compression was 1.7MB at the nominal interaction rate (150 kHz), exactly as expected and used for the resource calculations. At the end of the pp period, ALICE arrived at 43\% combined efficiency (LHC availability 47\% * ALICE efficiency 92\%). -The \PbPb (\snn~=~5.02~TeV) data taking period started in November 2018 and was scheduled for 24 days. The target was to reach a total integrated luminosity of 1 $\mathrm{nb^{-1}}$ for Run2 and to complete the ALICE goals for the collection of a large sample of central and minimum bias collisions. To achieve this, the interaction rate was leveled at 8 kHz (L = $\mathrm{1.0\times10^{27}}$ $\mathrm{cm^{-2}s^{-1}}$) and data taken at close to the maximum achievable readout rate. The accelerator conditions were different compared to the foreseen mainly because of the delay in the beam start by 3-4 days due to solenoid coil fault in LINAC3 and the 20\% loss of integrated luminosity due to beam sizes 50\% larger at IP2 than at IP1/IP5 during the whole Pb-Pb period. The LHC time in Stable Beams was 47\%, the average data taking efficiency by ALICE was 87\% and a maximum HLT compression factor close to 9 has been reached during the Pb-Pb period. To compensate for the reduced beam availability, the rates of different triggers were adjusted to increase as much as possible the statistics in central and semi-central events. Overall, we collected 251M central and mid-central events and 159M minimum bias events. To further minimize the impact of Pb-bPb run on tape resources, ALICE additionally compressed the non-TPC portion of RAW data (by applying level 2 gzip compression) resulting in additional 17\% reduction of data volume on tape. As a result, the accumulated amount of Pb--Pb RAW data was 5.5~PiB. A total amount of RAW data of 11~PiB, including pp, was written to tape at Tier0, and then replicated at the Tier1s. The data accumulation curve at Tier0 is shown in Fig.\ref{fig:rawdata} and about 4.2~PiB of RAW data has been replicated to CNAF during 2018 with a maximum rate of 360 TiB per week, limited only by the tape drives speed considering the 100 Gb/s LHCOPN bandwidth between CERN and CNAF, as shown by the Fig.\ref{fig:tottraftape}. +The \PbPb ($\snn=5.02$~TeV) data taking period started in November 2018 and was scheduled for 24 days. The target was to reach a total integrated luminosity of 1 $\mathrm{nb^{-1}}$ for Run2 and to complete the ALICE goals for the collection of a large sample of central and minimum bias collisions. To achieve this, the interaction rate was leveled at 8 kHz (L = $\mathrm{1.0\times10^{27}}$ $\mathrm{cm^{-2}s^{-1}}$) and data taken at close to the maximum achievable readout rate. The accelerator conditions were different compared to the foreseen mainly because of the delay in the beam start by 3-4 days due to solenoid coil fault in LINAC3 and the 20\% loss of integrated luminosity due to beam sizes 50\% larger at IP2 than at IP1/IP5 during the whole Pb-Pb period. The LHC time in Stable Beams was 47\%, the average data taking efficiency by ALICE was 87\% and a maximum HLT compression factor close to 9 has been reached during the Pb-Pb period. To compensate for the reduced beam availability, the rates of different triggers were adjusted to increase as much as possible the statistics in central and semi-central events. Overall, we collected 251M central and mid-central events and 159M minimum bias events. To further minimize the impact of Pb-bPb run on tape resources, ALICE additionally compressed the non-TPC portion of RAW data (by applying level 2 gzip compression) resulting in additional 17\% reduction of data volume on tape. As a result, the accumulated amount of Pb--Pb RAW data was 5.5~PiB. A total amount of RAW data of 11~PiB, including pp, was written to tape at Tier0, and then replicated at the Tier1s. The data accumulation curve at Tier0 is shown in Fig.\ref{fig:rawdata} and about 4.2~PiB of RAW data has been replicated to CNAF during 2018 with a maximum rate of 360 TiB per week, limited only by the tape drives speed considering the 100 Gb/s LHCOPN bandwidth between CERN and CNAF, as shown by the Fig.\ref{fig:tottraftape}. \begin{figure}[!ht] \begin{center} -\includegraphics[width=0.75\textwidth]{raw_data_accumulation_run2.png} +\includegraphics[width=0.75\textwidth]{raw_data_accumulation_run2} \end{center} \caption{Raw data accumulation curve for Run2.} \label{fig:rawdata} @@ -72,7 +72,7 @@ The p-p data collected in 2018 has been fully calibrated and processed in Pass1, \begin{figure}[!ht] \begin{center} -\includegraphics[width=0.75\textwidth]{total_traffic_cnaf_tape_2018.png} +\includegraphics[width=0.75\textwidth]{total_traffic_cnaf_tape_2018} \end{center} \caption{ALICE traffic per week and total traffic on the CNAF tape during 2018.} \label{fig:tottraftape} @@ -81,15 +81,16 @@ The p-p data collected in 2018 has been fully calibrated and processed in Pass1, Along 2018 ALICE many new physics results have been obtained from pp, p--Pb, \PbPb and \XeXe collisions from Run2 data taking, while also the collaboration has continued to work on results from the analysis of the Run1 data. Almost 50 papers have been submitted to journals in the last year, including in particular the main topics reported in the following. In \pp and in \pPb collisions, for instance, ALICE studied the - $\Lambda_{\rm c}^+$ production~\cite{Acharya:2017kfy}, the prompt and non-prompt $\hbox {J}/\psi $ production and nuclear modification at mid-rapidity~\cite{Acharya:2018yud} and the measurement of the inclusive J/$\psi$ polarization at forward rapidity in \pp collisions at $\mathbf {\sqrt{s} = 8}$~TeV \cite{Acharya:2018uww}. + $\Lambda_{\rm c}^+$ production~\cite{Acharya:2017kfy}, the prompt and non-prompt $\hbox {J}/\psi$ production and nuclear modification at mid-rapidity~\cite{Acharya:2018yud} and the measurement of the inclusive $\hbox {J}/\psi$ polarization at forward rapidity in \pp collisions +at $\s= 8$~TeV \cite{Acharya:2018uww}. Looking at \PbPb data ALICE succeeded in studying -the $D$-meson azimuthal anisotropy in midcentral Pb-Pb collisions at $\mathbf{\sqrt{s_{\rm NN}}=5.02}$ TeV~\cite{Acharya:2017qps}, the Z$^0$-boson production at large rapidities in Pb-Pb collisions at $\sqrt{s_{\rm NN}}=5.02$ TeV~\cite{Acharya:2017wpf} and the anisotropic flow of identified particles in Pb-Pb collisions at $ {\sqrt{s}}_{\mathrm{NN}}=5.02 $ TeV~\cite{Acharya:2018zuq}. The anisotropic flow was also studied in \XeXe collisions at $\mathbf{\sqrt{s_{\rm{NN}}} = 5.44}$ TeV~\cite{Acharya:2018ihu}, together with the inclusive J/$\psi$ production~\cite{Acharya:2018jvc} and the transverse momentum spectra and nuclear modification factors of charged particles~\cite{Acharya:2018eaq}.\\ +the $D$-meson azimuthal anisotropy in midcentral Pb-Pb collisions at $\snn=5.02$~TeV~\cite{Acharya:2017qps}, the Z$^0$-boson production at large rapidities in Pb-Pb collisions at $\snn=5.02$~TeV~\cite{Acharya:2017wpf} and the anisotropic flow of identified particles in Pb-Pb collisions at $ \snn=5.02 $~TeV~\cite{Acharya:2018zuq}. The anisotropic flow was also studied in \XeXe collisions at $\snn = 5.44$~TeV~\cite{Acharya:2018ihu}, together with the inclusive $\hbox {J}/\psi$ production~\cite{Acharya:2018jvc} and the transverse momentum spectra and nuclear modification factors of charged particles~\cite{Acharya:2018eaq}.\\ The general upgrade strategy for Run3 is conceived to deal with this challenge with expected \PbPb interaction rates of up to 50 kHz aiming at an integrated luminosity above 10 $\mathrm{nb^{-1}}$. The five TDRs, namely for the new ITS, the TPC GEM-based readout chambers, the Muon Forward Tracker, the Trigger and Readout system, and the Online/Offline computing system were fully approved by the CERN Research Board between 2014 and 2015. In 2017 the transition from the R\&D phase to the construction of prototypes of the final detector elements was successfully completed. For the major systems, the final prototype tests and evaluations were performed and the production readiness reviews have been successful, the production started during the 2017 and has been continued throughout 2018. \section{Computing model and R\&D activity in Italy} -The ALICE computing model is still heavily based on Grid distributed computing; since the very beginning, the base principle underlying it has been that every physicist should have equal access to the data and computing resources~\cite{ALICE:2005aa}. According to this principle, the ALICE peculiarity has always been to operate its Grid as a “cloud†of computing resources (both CPU and storage) with no specific role assigned to any given centre, the only difference between them being the Tier level to which they belong. All resources have to be made available to all ALICE members, according only to experiment policy and not on resource physical location, and data is distributed according to network topology and availability of resources and not in pre-defined datasets. Tier1s only peculiarities are their size and the availability of tape custodial storage, which holds a collective second copy of raw data and allows the collaboration to run event reconstruction tasks there. In the ALICE model, though, tape recall is almost never done: all useful data reside on disk, and the custodial tape copy is used only for safekeeping. All data access is done through the xrootd protocol, either through the use of “native†xrootd storage or, like in many large deployments, using xrootd servers in front of a distributed parallel filesystem like GPFS.\\ +The ALICE computing model is still heavily based on Grid distributed computing; since the very beginning, the base principle underlying it has been that every physicist should have equal access to the data and computing resources~\cite{ALICE:2005aa}. According to this principle, the ALICE peculiarity has always been to operate its Grid as a “cloud†of computing resources (both CPU and storage) with no specific role assigned to any given center, the only difference between them being the Tier level to which they belong. All resources have to be made available to all ALICE members, according only to experiment policy and not on resource physical location, and data is distributed according to network topology and availability of resources and not in pre-defined datasets. Tier1s only peculiarities are their size and the availability of tape custodial storage, which holds a collective second copy of raw data and allows the collaboration to run event reconstruction tasks there. In the ALICE model, though, tape recall is almost never done: all useful data reside on disk, and the custodial tape copy is used only for safekeeping. All data access is done through the xrootd protocol, either through the use of “native†xrootd storage or, like in many large deployments, using xrootd servers in front of a distributed parallel filesystem like GPFS.\\ The model has not changed significantly for Run2, except for scavenging of some extra computing power by opportunistically use the HLT farm when not needed for data taking. All raw data collected in 2017 has been passed through the calibration stages, including the newly developed track distortion calibration for the TPC, and has been validated by the offline QA process before entering the final reconstruction phase. The ALICE software build system has been extended with additional functionality to validate the AliRoot release candidates with a large set of raw data from different years as well as with various MC generators and configurations. It uses the CERN elastic cloud infrastructure, thus allowing for dynamic provision of resources as needed. The Grid utilization in the accounting period remained high, with no major incidents. The CPU/Wall efficiency remained constant, at about 85\% across all Tiers, similar to the previous year. The much higher data rate foreseen for Run3, though, will require a major rethinking of the current computing model in all its components, from the software framework to the algorithms and of the distributed infrastructure. The design of the new computing framework for Run3, started in 2013 and mainly based on the concepts of Online-Offline integration (“\OO\ Projectâ€), has been finalized with the corresponding Technical Design Report~\cite{Buncic:2015ari}: development and implementation phases as well as performance tests are currently ongoing.\\ The Italian share of the ALICE distributed computing effort (currently about 17\%) includes resources both form the Tier1 at CNAF and from the Tier2s in Bari, Catania, Torino and Padova-LNL, plus some extra resources in Trieste. The contribution from the Italian community to the ALICE computing in 2018 has been mainly spread over the usual items, such as the development and maintenance of the (AliRoot) software framework, the management of the computing infrastructure (Tier1 and Tier2 sites) and the participation in the Grid operations of the experiment.\\ @@ -97,12 +98,12 @@ In addition, in the framework of the computing R\&D activities in Italy, the des \section{Role and contribution of the INFN Tier1 at CNAF} -CNAF is a full-fledged ALICE Tier1 centre, having been one of the first to join the production infrastructure years ago. According to the ALICE cloud-like computing model, it has no special assigned task or reference community, but provides computing and storage resources to the whole collaboration, along with offering valuable support staff for the experiment’s computing activities. It provides reliable xrootd access both to its disk storage and to the tape infrastructure, through a TSM plugin that was developed by CNAF staff specifically for ALICE use.\\ -As a result of flooding, the CNAF computing centre stopped operation on November 8th, 2017; tape access had been made available again on January 31st 2018, and the ALICE Storage Element was fully recovered by February 23th. The loss of CPU resources during the Tier1 shutdown was partially mitigated by the reallocation of the Tier1 worker nodes located in Bari to the Tier2 Bari queue. At the end of February 2018 the CNAF local farm had been powered again moving from 50 kHS06 gradually to 140 kHS06. In addition, on March 15th 170 kHS06 at CINECA became available thanks to a 500 Gb/s dedicated link. -Since March running at CNAF has been remarkably stable: for example, both the disk and tape storage availabilities have been better than 98\%, ranking CNAF in the top 5 most reliable sites for ALICE. The computing resources provided for ALICE at the CNAF Tier1 centre were fully used along the year, matching and often exceeding the pledged amounts due to access to resources unused by other collaborations. Overall, about 64\% of the ALICE computing activity was Monte Carlo simulation, 14\% raw data processing (which takes place at the Tier0 and Tier1 centres only) and 22\% analysis activities: Fig.~\ref{fig:runjobsusers} illustrates the share among the different activities in the ALICE running job profile along the last 12 months.\\ +CNAF is a full-fledged ALICE Tier1 center, having been one of the first to join the production infrastructure years ago. According to the ALICE cloud-like computing model, it has no special assigned task or reference community, but provides computing and storage resources to the whole collaboration, along with offering valuable support staff for the experiment’s computing activities. It provides reliable xrootd access both to its disk storage and to the tape infrastructure, through a TSM plugin that was developed by CNAF staff specifically for ALICE use.\\ +As a result of flooding, the CNAF computing center stopped operation on November 8th, 2017; tape access had been made available again on January 31st 2018, and the ALICE Storage Element was fully recovered by February 23th. The loss of CPU resources during the Tier1 shutdown was partially mitigated by the reallocation of the Tier1 worker nodes located in Bari to the Tier2 Bari queue. At the end of February 2018 the CNAF local farm had been powered again moving from 50 kHS06 gradually to 140 kHS06. In addition, on March 15th 170 kHS06 at CINECA became available thanks to a 500 Gb/s dedicated link. +Since March running at CNAF has been remarkably stable: for example, both the disk and tape storage availabilities have been better than 98\%, ranking CNAF in the top 5 most reliable sites for ALICE. The computing resources provided for ALICE at the CNAF Tier1 center were fully used along the year, matching and often exceeding the pledged amounts due to access to resources unused by other collaborations. Overall, about 64\% of the ALICE computing activity was Monte Carlo simulation, 14\% raw data processing (which takes place at the Tier0 and Tier1 centers only) and 22\% analysis activities: Fig.~\ref{fig:runjobsusers} illustrates the share among the different activities in the ALICE running job profile along the last 12 months.\\ \begin{figure}[!ht] \begin{center} -\includegraphics[width=0.75\textwidth]{running_jobs_per_users_2018.png} +\includegraphics[width=0.75\textwidth]{running_jobs_per_users_2018} \end{center} \caption{Share among the different ALICE activities in the 2018 running jobs profile.} \label{fig:runjobsusers} @@ -112,16 +113,16 @@ Since April 2018, CNAF deployed the pledged resources corresponding to about 52 The INFN Tier1 has provided about 4,9\% since March 2018 and about 4.20\% along all year of the total CPU hours used by ALICE, ranking second of the ALICE Tier1 sites despite the flooding incident, as shown in Fig. \ref{fig:walltimesharet1}. The cumulated fraction of CPU hours along the whole year for CNAF is about 21\% of the all ALICE Tier1 sites, following only FZK in Karlsruhe (24\%). \begin{figure}[!ht] \begin{center} -\includegraphics[width=0.75\textwidth]{wall_time_tier1_2018.png} +\includegraphics[width=0.75\textwidth]{wall_time_tier1_2018} \end{center} -\caption{Ranking of CNAF among ALICE Tier1 centres in 2018.} +\caption{Ranking of CNAF among ALICE Tier1 centers in 2018.} \label{fig:walltimesharet1} \end{figure} This amounts to about 44\% of the total Wall Time of the INFN contribution: it successfully completed nearly 10.5 million jobs, for a total of more than 44 millions CPU hours, the running job profile at CNAF in 2018 is shown in Fig.\ref{fig:rjobsCNAFunov}.\\ Since mid-November a new job submission queue has been made available to ALICE and used to successfully test the job queueing mechanism, the scheduling policy, the priority scheme, the resource monitoring and the resource management with HTCondor at CNAF. \begin{figure}[!ht] \begin{center} -\includegraphics[width=0.75\textwidth]{running_jobs_CNAF_2018.png} +\includegraphics[width=0.75\textwidth]{running_jobs_CNAF_2018} \end{center} \caption{Running jobs profile at CNAF in 2018.} \label{fig:rjobsCNAFunov} @@ -129,7 +130,7 @@ Since mid-November a new job submission queue has been made available to ALICE a At the end of the last year ALICE was keeping on disk at CNAF more than 4.1 PiB of data in nearly 118 million files, plus more than 10 PiB of raw data on custodial tape storage; the reliability of the storage infrastructure is commendable, even taking into account the extra layer of complexity introduced by the xrootd interfaces. The excellent FS performances allow to analyse data from SE with an average throughput of about 1.6 GB/s and a peak throughput of about 3.0 GB/s, as shown in Fig.\ref{fig:nettrafse}. \begin{figure}[!ht] \begin{center} -\includegraphics[width=0.75\textwidth]{network_traffic_cnaf_se_2018.png} +\includegraphics[width=0.75\textwidth]{network_traffic_cnaf_se_2018} \end{center} \caption{Network traffic on the ALICE xrootd servers at CNAF during 2018.} \label{fig:nettrafse} @@ -154,7 +155,7 @@ Also network connectivity has always been reliable; the 100 Gb/s of the LHCOPN a %\cite{Adam:2015ptt} \bibitem{Adam:2015ptt} J.~Adam {\it et al.} [ALICE Collaboration], - %``Centrality dependence of the charged-particle multiplicity density at midrapidity in Pb-Pb collisions at $\sqrt{s_{\rm NN}}$ = 5.02 TeV,'' + %``Centrality dependence of the charged-particle multiplicity density at midrapidity in Pb-Pb collisions at $\snn = 5.02$ TeV,'' Phys.\ Rev.\ Lett.\ {\bf 116} (2016) no.22, 222302. % doi:10.1103/PhysRevLett.116.222302 % [arXiv:1512.06104 [nucl-ex]]. @@ -164,7 +165,7 @@ Also network connectivity has always been reliable; the 100 Gb/s of the LHCOPN a %\cite{Adam:2016izf} \bibitem{Adam:2016izf} J.~Adam {\it et al.} [ALICE Collaboration], - %``Anisotropic flow of charged particles in Pb-Pb collisions at $\sqrt{s_{\rm NN}}=5.02$ TeV,'' + %``Anisotropic flow of charged particles in Pb-Pb collisions at $\snn=5.02$ TeV,'' Phys.\ Rev.\ Lett.\ {\bf 116} (2016) no.13, 132302. % doi:10.1103/PhysRevLett.116.132302 % [arXiv:1602.01119 [nucl-ex]]. @@ -181,7 +182,7 @@ Also network connectivity has always been reliable; the 100 Gb/s of the LHCOPN a %\cite{Acharya:2017kfy} \bibitem{Acharya:2017kfy} S.~Acharya {\it et al.} [ALICE Collaboration], - %``$\Lambda_{\rm c}^+$ production in pp collisions at $\sqrt{s} = 7$ TeV and in p-Pb collisions at $\sqrt{s_{\rm NN}} = 5.02$ TeV,'' + %``$\Lambda_{\rm c}^+$ production in pp collisions at $\s = 7$ TeV and in p-Pb collisions at $\snn = 5.02$ TeV,'' JHEP {\bf 1804} (2018) 108. % doi:10.1007/JHEP04(2018)108 % [arXiv:1712.09581 [nucl-ex]]. @@ -190,7 +191,7 @@ Also network connectivity has always been reliable; the 100 Gb/s of the LHCOPN a \bibitem{Acharya:2018yud} S.~Acharya {\it et al.} [ALICE Collaboration], - %``Prompt and non-prompt $\hbox {J}/\psi $ production and nuclear modification at mid-rapidity in p–Pb collisions at $\mathbf{\sqrt{{ s}_{\text {NN}}}= 5.02}$ TeV,'' + %``Prompt and non-prompt $\hbox {J}/\psi $ production and nuclear modification at mid-rapidity in p–Pb collisions at $\snn= 5.02}$ TeV,'' Eur.\ Phys.\ J.\ C {\bf 78} (2018) no.6, 466. % doi:10.1140/epjc/s10052-018-5881-2 % [arXiv:1802.00765 [nucl-ex]]. @@ -200,7 +201,7 @@ Also network connectivity has always been reliable; the 100 Gb/s of the LHCOPN a %\cite{Acharya:2018uww} \bibitem{Acharya:2018uww} S.~Acharya {\it et al.} [ALICE Collaboration], - %``Measurement of the inclusive J/ $\psi $ polarization at forward rapidity in pp collisions at $\mathbf {\sqrt{s} = 8}$ TeV,'' + %``Measurement of the inclusive J/ $\psi $ polarization at forward rapidity in pp collisions at $\s = 8$ TeV,'' Eur.\ Phys.\ J.\ C {\bf 78} (2018) no.7, 562. % doi:10.1140/epjc/s10052-018-6027-2 % [arXiv:1805.04374 [hep-ex]]. @@ -210,7 +211,7 @@ Also network connectivity has always been reliable; the 100 Gb/s of the LHCOPN a %\cite{Acharya:2017qps} \bibitem{Acharya:2017qps} S.~Acharya {\it et al.} [ALICE Collaboration], - %``$D$-meson azimuthal anisotropy in midcentral Pb-Pb collisions at $\mathbf{\sqrt{s_{\rm NN}}=5.02}$ TeV,'' + %``$D$-meson azimuthal anisotropy in midcentral Pb-Pb collisions at $\snn=5.02}$ TeV,'' Phys.\ Rev.\ Lett.\ {\bf 120} (2018) no.10, 102301. % doi:10.1103/PhysRevLett.120.102301 % [arXiv:1707.01005 [nucl-ex]]. @@ -220,7 +221,7 @@ Also network connectivity has always been reliable; the 100 Gb/s of the LHCOPN a %\cite{Acharya:2017wpf} \bibitem{Acharya:2017wpf} S.~Acharya {\it et al.} [ALICE Collaboration], - %``Measurement of Z$^0$-boson production at large rapidities in Pb-Pb collisions at $\sqrt{s_{\rm NN}}=5.02$ TeV,'' + %``Measurement of Z$^0$-boson production at large rapidities in Pb-Pb collisions at $\snn=5.02$ TeV,'' Phys.\ Lett.\ B {\bf 780} (2018) 372. % doi:10.1016/j.physletb.2018.03.010 % [arXiv:1711.10753 [nucl-ex]]. @@ -230,7 +231,7 @@ Also network connectivity has always been reliable; the 100 Gb/s of the LHCOPN a %\cite{Acharya:2018zuq} \bibitem{Acharya:2018zuq} S.~Acharya {\it et al.} [ALICE Collaboration], - %``Anisotropic flow of identified particles in Pb-Pb collisions at $ {\sqrt{s}}_{\mathrm{NN}}=5.02 $ TeV,'' + %``Anisotropic flow of identified particles in Pb-Pb collisions at $\snn=5.02 $ TeV,'' JHEP {\bf 1809} (2018) 006. % doi:10.1007/JHEP09(2018)006 % [arXiv:1805.04390 [nucl-ex]]. @@ -240,7 +241,7 @@ Also network connectivity has always been reliable; the 100 Gb/s of the LHCOPN a %\cite{Acharya:2018ihu} \bibitem{Acharya:2018ihu} S.~Acharya {\it et al.} [ALICE Collaboration], - %``Anisotropic flow in Xe-Xe collisions at $\mathbf{\sqrt{s_{\rm{NN}}} = 5.44}$ TeV,'' + %``Anisotropic flow in Xe-Xe collisions at $\snn = 5.44}$ TeV,'' Phys.\ Lett.\ B {\bf 784} (2018) 82. % doi:10.1016/j.physletb.2018.06.059 % [arXiv:1805.01832 [nucl-ex]]. @@ -250,7 +251,7 @@ Also network connectivity has always been reliable; the 100 Gb/s of the LHCOPN a %\cite{Acharya:2018jvc} \bibitem{Acharya:2018jvc} S.~Acharya {\it et al.} [ALICE Collaboration], - %``Inclusive J/$\psi$ production in Xe–Xe collisions at $\sqrt{s_{\rm NN}}$ = 5.44 TeV,'' + %``Inclusive J/$\psi$ production in Xe–Xe collisions at $\snn = 5.44$ TeV,'' Phys.\ Lett.\ B {\bf 785} (2018) 419. % doi:10.1016/j.physletb.2018.08.047 % [arXiv:1805.04383 [nucl-ex]]. @@ -260,7 +261,7 @@ Also network connectivity has always been reliable; the 100 Gb/s of the LHCOPN a %\cite{Acharya:2018eaq} \bibitem{Acharya:2018eaq} S.~Acharya {\it et al.} [ALICE Collaboration], - %``Transverse momentum spectra and nuclear modification factors of charged particles in Xe-Xe collisions at $\sqrt{s_{\rm NN}}$ = 5.44 TeV,'' + %``Transverse momentum spectra and nuclear modification factors of charged particles in Xe-Xe collisions at $\snn= 5.44$ TeV,'' Phys.\ Lett.\ B {\bf 788} (2019) 166. % doi:10.1016/j.physletb.2018.10.052 % [arXiv:1805.04399 [nucl-ex]]. @@ -295,7 +296,7 @@ Also network connectivity has always been reliable; the 100 Gb/s of the LHCOPN a %\bibitem{Abelev:2014dsa} % B.~B.~Abelev {\it et al.} [ALICE Collaboration], % %``Transverse momentum dependence of inclusive primary charged-particle production in p-Pb -% collisions at $\sqrt{s_\mathrm{{NN}}}=5.02~\text {TeV}$,'' +% collisions at $\snn=5.02~\text {TeV}$,'' % Eur.\ Phys.\ J.\ C {\bf 74} (2014) no.9, 3054. % %doi:10.1140/epjc/s10052-014-3054-5 % %[arXiv:1405.2737 [nucl-ex]]. @@ -306,7 +307,7 @@ Also network connectivity has always been reliable; the 100 Gb/s of the LHCOPN a %\bibitem{Abelev:2013haa} % B.~B.~Abelev {\it et al.} [ALICE Collaboration], % %``Multiplicity Dependence of Pion, Kaon, Proton and Lambda Production in p-Pb Collisions -% at $\sqrt{s_{NN}}$ = 5.02 TeV,'' +% at $\snn = 5.02$ TeV,'' % Phys.\ Lett.\ B {\bf 728} (2014) 25. % %doi:10.1016/j.physletb.2013.11.020 % %[arXiv:1307.6796 [nucl-ex]]. diff --git a/contributions/ams/AMS-report-2019.tex b/contributions/ams/AMS-report-2019.tex index 19e770646633e67dc7963bfaa371e58a31d6885f..846e4a20ad0e1755b611293af0dd96bcfd15b8db 100644 --- a/contributions/ams/AMS-report-2019.tex +++ b/contributions/ams/AMS-report-2019.tex @@ -8,8 +8,8 @@ \author{B. Bertucci$^{1,2}$, M. Duranti$^2$, V. Formato$^{2,\ast}$, D. Spiga$^{2}$} -\address{$^1$ Universit\`a di Perugia, I-06100 Perugia, Italy} -\address{$^2$ INFN, Sezione Perugia, I-06100 Perugia, Italy} +\address{$^1$ Universit\`a di Perugia, Perugia, IT} +\address{$^2$ INFN Sezione di Perugia, Perugia, IT} \address{AMS experiment \url{http://ams.cern.ch}, \url{http://www.ams02.org}, \url{http://www.pg.infn.it/ams/}} @@ -210,13 +210,13 @@ Carlo production and data analysis. Those activities have produced four publicat \subsection*{Data analysis} -Different analysis are carried on by the Italian collaboration. In 2018, the CNAF resources for user analysis have been devoted to several different topic: the update, with more statistics, of the electron and positron analyses (they resulted in two PRL publications in 2019 \cite{Aguilar:2019pos,Aguilar:2019ele}), the measurement of the light nuclei abundances (that resulted in the PRL publications \cite{Aguilar:2018keu,Aguilar:2018njt}) and the study of their time variation as well as the study of the proton and helium flux as a function of time, the deuteron abundance measurement and the antideuteron search analysis. +Different analysis are carried on by the Italian collaboration. In 2018, the CNAF resources for user analysis have been devoted to several different topics: the update, with more statistics, of the electron and positron analyses (they resulted in two PRL publications in 2019 \cite{Aguilar:2019pos,Aguilar:2019ele}), the measurement of the light nuclei abundances (that resulted in the PRL publications \cite{Aguilar:2018keu,Aguilar:2018njt}) and the study of their time variation as well as the study of the proton and helium flux as a function of time, the deuteron abundance measurement and the antideuteron search analysis. %The disk resources pledged in 2018, $\sim$ 2 PB, were mostly devoted to the pass7 data sample ($\sim$ 1 PB), MC data sample ($\sim$ 400 TB), selected data streams ($\sim$ 100 TB of pre-selected data used for common electron/positron, antiproton, antideuteron, proton and ion analysis) and scratch area for users. \subsection*{Research and Development} \label{ReD} As mentioned above, during 2017 AMS started evaluating the technical feasibility of integrating also cloud resources (possibly seamlessly) in order to primarily benefit of external computing resources, meant as opportunistic resources. The architectural model foreseen is that all AMS data are and will be hosted at CNAF. Possible cloud compute resources should be able to remotely access data (might be caching locally for the sake of the I/O optimization) and produced data (namely output files) should be moved into the CNAF storage.\\ -AMS work-flow has been successfully integrated in DODAS (Dynamic On Demand Analysis Service, a thematic service funded by the EOSC-hub European project) and the work-flow has been validated and consolidated during 2018. The success of the validation tests performed over HelixNebula Science Cloud provided resources and over Google Cloud INFN grant motivate further exploitation as well as evolution of the strategy. In total in 2018 the Italian collaboration benefited of more than 4\textit{\,k\,HS06\,yr} of opportunistic resources, that represent $\sim$ 20\% of the ones obtained from CNAF.\\ +AMS work-flow has been successfully integrated in DODAS (Dynamic On Demand Analysis Service, a thematic service funded by the EOSC-hub European project) and it has been validated and consolidated during 2018. The success of the validation tests performed over HelixNebula Science Cloud provided resources and over Google Cloud INFN grant motivate further exploitation as well as evolution of the strategy. In total in 2018 the Italian collaboration benefited of more than 4\textit{\,k\,HS06\,yr} of opportunistic resources, that represent $\sim$ 20\% of the ones obtained from CNAF.\\ More in detail during the 2019 the plan is to consolidate the usage of the INFN on-premises cloud providers, namely Cloud@ReCaS Bari and Cloud@CNAF in the context of DODAS. Consolidation by means of improvement in managing I/O by using emerging solution for data caching as well as starting exploiting geographically distributed clusters.\\ The latter is about exploiting DODAS based solutions to create a single logical cluster running over any available resource provider. The desired solution is to allow user submitting jobs from e.g. CNAF provided User Interface to a single queue and allow dynamic clusters to fetch payloads in a secure and transparent (to the end user) way.\\ From a technical perspective the distributed cluster implementation will be based on HTCondor technology which is a important strategic aspect because of we expect this will allow, later on, a completely seamless integration within the batch system of the CNAF Tier 1. diff --git a/contributions/atlas/atlas.tex b/contributions/atlas/atlas.tex index 4d57ed330ff53d4f288afc87a2c09bacadaa36b7..b3e25bd46bdd5fc56783cedc25001b906bb44c37 100644 --- a/contributions/atlas/atlas.tex +++ b/contributions/atlas/atlas.tex @@ -1,24 +1,25 @@ \documentclass[a4paper]{jpconf} \usepackage{graphicx} \begin{document} -\title{The ATLAS Experiment at the INFN CNAF Tier-1} +\title{The ATLAS Experiment at the INFN CNAF Tier 1} -\author{Alessandro De Salvo$^1$, Lorenzo Rinaldi$^2$} +\author{A. De Salvo$^1$, L. Rinaldi$^2$} -\address{$^1$ INFN Sezione di Roma-1, piazzale Aldo Moro 2, 00185 Roma, Italy,\\ $^2$ Universit\`a di Bologna e INFN, via Irnerio 46, 40126 Bologna, Italy} +\address{$^1$ INFN Sezione di Roma-1, Roma, IT} +\address{$^2$ Universit\`a di Bologna e INFN Sezione di Bologna, Bologna, IT} \ead{alessandro.desalvo@roma1.infn.it, lorenzo.rinaldi@bo.infn.it} \begin{abstract} -The ATLAS experiment at LHC was fully operating in 2017. In this contribution we describe the ATLAS computing activities performed in the Italian sites of the Collaboration, and in particular the utilisation of the CNAF Tier-1. +The ATLAS experiment at LHC was fully operating in 2017. In this contribution we describe the ATLAS computing activities performed in the Italian sites of the Collaboration, and in particular the utilisation of the CNAF Tier 1. \end{abstract} \section{Introduction} -ATLAS \cite{ATLAS-det} is one of two general-purpose detectors at the Large Hadron Collider (LHC). It investigates a wide range of physics, from the search for the Higgs boson and standard model studies to extra dimensions and particles that could make up dark matter. Beams of particles from the LHC collide at the centre of the ATLAS detector making collision debris in the form of new particles, which fly out from the collision point in all directions. Six different detecting subsystems arranged in layers around the collision point record the paths, momentum, and energy of the particles, allowing them to be individually identified. A huge magnet system bends the paths of charged particles so that their momenta can be measured. The interactions in the ATLAS detectors create an enormous flow of data. To digest the data, ATLAS uses an advanced trigger system to tell the detector which events to record and which to ignore. Complex data-acquisition and computing systems are then used to analyse the collision events recorded. At 46 m long, 25 m high and 25 m wide, the 7000-tons ATLAS detector is the largest volume particle detector ever built. It sits in a cavern 100 m below ground near the main CERN site, close to the village of Meyrin in Switzerland. +ATLAS \cite{ATLAS-det} is one of two general-purpose detectors at the Large Hadron Collider (LHC). It investigates a wide range of physics, from the search for the Higgs boson and standard model studies to extra dimensions and particles that could make up dark matter. Beams of particles from the LHC collide at the center of the ATLAS detector making collision debris in the form of new particles, which fly out from the collision point in all directions. Six different detecting subsystems arranged in layers around the collision point record the paths, momentum, and energy of the particles, allowing them to be individually identified. A huge magnet system bends the paths of charged particles so that their momenta can be measured. The interactions in the ATLAS detectors create an enormous flow of data. To digest the data, ATLAS uses an advanced trigger system to tell the detector which events to record and which to ignore. Complex data-acquisition and computing systems are then used to analyse the collision events recorded. At 46 m long, 25 m high and 25 m wide, the 7000-tons ATLAS detector is the largest volume particle detector ever built. It sits in a cavern 100 m below ground near the main CERN site, close to the village of Meyrin in Switzerland. More than 3000 scientists from 174 institutes in 38 countries work on the ATLAS experiment. -ATLAS has been taking data from 2010 to 2012, at center of mass energies of 7 and 8 TeV, collecting about 5 and 20 fb$^{-1}$ of integrated luminosity, respectively. During the complete Run-2 phase (2015-2018) ATLAS collected and registered at the Tier-0 147 fb$^{-1}$ of integrated luminosity at center of mass energies of 13 TeV. +ATLAS has been taking data from 2010 to 2012, at center of mass energies of 7 and 8 TeV, collecting about 5 and 20 fb$^{-1}$ of integrated luminosity, respectively. During the complete Run-2 phase (2015-2018) ATLAS collected and registered at the Tier 0 147 fb$^{-1}$ of integrated luminosity at center of mass energies of 13 TeV. The experiment has been designed to look for New Physics over a very large set of final states and signatures, and for precision measurements of known Standard Model (SM) processes. Its most notable result up to now has been the discovery of a new resonance at a mass of about 125 GeV \cite{ATLAS higgs}, followed by the measurement of its properties (mass, production cross sections in various channels and couplings). These measurements have confirmed the compatibility of the new resonance with the Higgs boson, foreseen by the SM but never observed before. @@ -30,14 +31,14 @@ The experiment has been designed to look for New Physics over a very large set o The ATLAS Computing System \cite{ATLAS-cm} is responsible for the provision of the software framework and services, the data management system, user-support services, and the world-wide data access and job-submission system. The development of detector-specific algorithmic code for simulation, calibration, alignment, trigger and reconstruction is under the responsibility of the detector projects, but the Software and Computing Project plans and coordinates these activities across detector boundaries. In particular, a significant effort has been made to ensure that relevant parts of the “offline†framework and event-reconstruction code can be used in the High Level Trigger. Similarly, close cooperation with Physics Coordination and the Combined Performance groups ensures the smooth development of global event-reconstruction code and of software tools for physics analysis. \subsection{The ATLAS Computing Model} -The ATLAS Computing Model embraces the Grid paradigm and a high degree of decentralisation and sharing of computing resources. The required level of computing resources means that off-site facilities are vital to the operation of ATLAS in a way that was not the case for previous CERN-based experiments. The primary event processing occurs at CERN in a Tier-0 Facility. The RAW data is archived at CERN and copied (along with the primary processed data) to the Tier-1 facilities around the world. These facilities archive the raw data, provide the reprocessing capacity, provide access to the various processed versions, and allow scheduled analysis of the processed data by physics analysis groups. Derived datasets produced by the physics groups are copied to the Tier-2 facilities for further analysis. The Tier-2 facilities also provide the simulation capacity for the experiment, with the simulated data housed at Tier-1s. In addition, Tier-2 centres provide analysis facilities, and some provide the capacity to produce calibrations based on processing raw data. A CERN Analysis Facility provides an additional analysis capacity, with an important role in the calibration and algorithmic development work. ATLAS has adopted an object-oriented approach to software, based primarily on the C++ programming language, but with some components implemented using FORTRAN and Java. A component-based model has been adopted, whereby applications are built up from collections of plug-compatible components based on a variety of configuration files. This capability is supported by a common framework that provides common data-processing support. This approach results in great flexibility in meeting both the basic processing needs of the experiment, but also for responding to changing requirements throughout its lifetime. The heavy use of abstract interfaces allows for different implementations to be provided, supporting different persistency technologies, or optimized for the offline or high-level trigger environments. +The ATLAS Computing Model embraces the Grid paradigm and a high degree of decentralisation and sharing of computing resources. The required level of computing resources means that off-site facilities are vital to the operation of ATLAS in a way that was not the case for previous CERN-based experiments. The primary event processing occurs at CERN in a Tier 0 Facility. The RAW data is archived at CERN and copied (along with the primary processed data) to the Tier 1 facilities around the world. These facilities archive the raw data, provide the reprocessing capacity, provide access to the various processed versions, and allow scheduled analysis of the processed data by physics analysis groups. Derived datasets produced by the physics groups are copied to the Tier 2 facilities for further analysis. The Tier 2 facilities also provide the simulation capacity for the experiment, with the simulated data housed at Tier 1 centers. In addition, Tier 2 centers provide analysis facilities, and some provide the capacity to produce calibrations based on processing raw data. A CERN Analysis Facility provides an additional analysis capacity, with an important role in the calibration and algorithmic development work. ATLAS has adopted an object-oriented approach to software, based primarily on the C++ programming language, but with some components implemented using FORTRAN and Java. A component-based model has been adopted, whereby applications are built up from collections of plug-compatible components based on a variety of configuration files. This capability is supported by a common framework that provides common data-processing support. This approach results in great flexibility in meeting both the basic processing needs of the experiment, but also for responding to changing requirements throughout its lifetime. The heavy use of abstract interfaces allows for different implementations to be provided, supporting different persistency technologies, or optimized for the offline or high-level trigger environments. The Athena framework is an enhanced version of the Gaudi framework that was originally developed by the LHCb experiment, but is now a common ATLAS-LHCb project. Major design principles are the clear separation of data and algorithms, and between transient (in-memory) and persistent (in-file) data. All levels of processing of ATLAS data, from high-level trigger to event simulation, reconstruction and analysis, take place within the Athena framework; in this way it is easier for code developers and users to test and run algorithmic code, with the assurance that all geometry and conditions data will be the same for all types of applications ( simulation, reconstruction, analysis, visualization). One of the principal challenges for ATLAS computing is to develop and operate a data storage and management infrastructure able to meet the demands of a yearly data volume of O(10PB) utilized by data processing and analysis activities spread around the world. The ATLAS Computing Model establishes the environment and operational requirements that ATLAS data-handling systems must support and provides the primary guidance for the development of the data management systems. The ATLAS Databases and Data Management Project (DB Project) leads and coordinates ATLAS activities in these areas, with a scope encompassing technical data bases (detector production, installation and survey data), detector geometry, online/TDAQ databases, conditions databases (online and offline), event data, offline processing configuration and bookkeeping, distributed data management, and distributed database and data management services. The project is responsible for ensuring the coherent development, integration and operational capability of the distributed database and data management software and infrastructure for ATLAS across these areas. -The ATLAS Computing Model defines the distribution of raw and processed data to Tier-1 and Tier-2 centres, so as to be able to exploit fully the computing resources that are made available to the Collaboration. Additional computing resources are available for data processing and analysis at Tier-3 centres and other computing facilities to which ATLAS may have access. A complex set of tools and distributed services, enabling the automatic distribution and processing of the large amounts of data, has been developed and deployed by ATLAS in cooperation with the LHC Computing Grid (LCG) Project and with the middleware providers of the three large Grid infrastructures we use: EGI, OSG and NorduGrid. The tools are designed in a flexible way, in order to have the possibility to extend them to use other types of Grid middleware in the future. -The main computing operations that ATLAS have to run comprise the preparation, distribution and validation of ATLAS software, and the computing and data management operations run centrally on Tier-0, Tier-1s and Tier-2s. The ATLAS Virtual Organization allows production and analysis users to run jobs and access data at remote sites using the ATLAS-developed Grid tools. -The Computing Model, together with the knowledge of the resources needed to store and process each ATLAS event, gives rise to estimates of required resources that can be used to design and set up the various facilities. It is not assumed that all Tier-1s or Tier-2s are of the same size; however, in order to ensure a smooth operation of the Computing Model, all Tier-1s usually have broadly similar proportions of disk, tape and CPU, and similarly for the Tier-2s. +The ATLAS Computing Model defines the distribution of raw and processed data to Tier 1 and Tier 2 centers, so as to be able to exploit fully the computing resources that are made available to the Collaboration. Additional computing resources are available for data processing and analysis at Tier 3 centers and other computing facilities to which ATLAS may have access. A complex set of tools and distributed services, enabling the automatic distribution and processing of the large amounts of data, has been developed and deployed by ATLAS in cooperation with the LHC Computing Grid (LCG) Project and with the middleware providers of the three large Grid infrastructures we use: EGI, OSG and NorduGrid. The tools are designed in a flexible way, in order to have the possibility to extend them to use other types of Grid middleware in the future. +The main computing operations that ATLAS have to run comprise the preparation, distribution and validation of ATLAS software, and the computing and data management operations run centrally on Tier 0, Tier 1 sites and Tier 2 sites. The ATLAS Virtual Organization allows production and analysis users to run jobs and access data at remote sites using the ATLAS-developed Grid tools. +The Computing Model, together with the knowledge of the resources needed to store and process each ATLAS event, gives rise to estimates of required resources that can be used to design and set up the various facilities. It is not assumed that all Tier 1 sites or Tier 2 sites are of the same size; however, in order to ensure a smooth operation of the Computing Model, all Tier 1 centers usually have broadly similar proportions of disk, tape and CPU, and similarly for the Tier 2 sites. The organization of the ATLAS Software and Computing Project reflects all areas of activity within the project itself. Strong high-level links are established with other parts of the ATLAS organization, such as the TDAQ Project and Physics Coordination, through cross-representation in the respective steering boards. The Computing Management Board, and in particular the Planning Officer, acts to make sure that software and computing developments take place coherently across sub-systems and that the project as a whole meets its milestones. The International Computing Board assures the information flow between the ATLAS Software and Computing Project and the national resources and their Funding Agencies. @@ -45,22 +46,22 @@ Board, and in particular the Planning Officer, acts to make sure that software a \section{The role of the Italian Computing facilities in the global ATLAS Computing} -Italy provides Tier-1, Tier-2 and Tier-3 facilities to the ATLAS collaboration. The Tier-1, located at CNAF, Bologna, is the main centre, also referred as “regional†centre. The Tier-2 centres are distributed in different areas of Italy, namely in Frascati, Napoli, Milano and Roma. All 4 Tier-2 sites are considered as Direct Tier-2 (T2D), meaning that they have an higher importance with respect to normal Tier-2s and can have primary data too. They are also considered satellites of the Tier-1, also identified as nucleus. The total of the Tier-2 sites corresponds to more than the total ATLAS size at the Tier-1, for what concerns disk and CPUs; tape is not available in the Tier-2 sites. A third category of sites is the so-called Tier-3 centres. Those are smaller centres, scattered in different places in Italy, that nevertheless contributes in a consistent way to the overall computing power, in terms of disk and CPUs. The overall size of the Tier-3 sites corresponds roughly to the size of a Tier-2 site. The Tier-1 and Tier-2 sites have pledged resources, while the Tier-3 sites do not have any pledge resource available. -In terms of pledged resources, Italy contributes to the ATLAS computing as 9\% of both CPU and disk for the Tier-1. The share of the Tier-2 facilities corresponds to 7\% of disk and 9\% of CPU of the whole ATLAS computing infrastructure. The Italian Tier-1, together with the other Italian centres, provides both resources and expertise to the ATLAS computing community, and manages the so-called Italian Cloud of computing. Since 2015 the Italian Cloud does not only include Italian sites, but also Tier-3 sites of other countries, namely South Africa and Greece. -The computing resources, in terms of disk, tape and CPU, available in the Tier-1 at CNAF have been very important for all kind of activities, including event generation, simulation, reconstruction, reprocessing and analysis, for both MonteCarlo and real data. Its major contribution has been the data reprocessing, since this is a very I/O and memory intense operation, normally executed only in Tier-1 centres. In this sense CNAF has played a fundamental role for the fine measurement of the Higgs [3] properties in 2018 and other analysis. The Italian centres, including CNAF, have been very active not only in the operation side, but contributed a lot in various aspect of the Computing of the ATLAS experiment, in particular for what concerns the network, the storage systems, the storage federations and the monitoring tools. The Tier-1 at CNAF has been very important for the ATLAS community in 2018, for some specific activities: +Italy provides Tier 1, Tier 2 and Tier 3 facilities to the ATLAS collaboration. The Tier 1, located at CNAF, Bologna, is the main center, also referred as “regional†center. The Tier 2 centers are distributed in different areas of Italy, namely in Frascati, Napoli, Milano and Roma. All 4 Tier 2 sites are considered as Direct Tier 2 (T2D), meaning that they have an higher importance with respect to normal Tier 2s and can have primary data too. They are also considered satellites of the Tier 1, also identified as nucleus. The total of the Tier 2 sites corresponds to more than the total ATLAS size at the Tier 1, for what concerns disk and CPUs; tape is not available in the Tier 2 sites. A third category of sites is the so-called Tier 3 centers. Those are smaller centers, scattered in different places in Italy, that nevertheless contributes in a consistent way to the overall computing power, in terms of disk and CPUs. The overall size of the Tier 3 sites corresponds roughly to the size of a Tier 2 site. The Tier 1 and Tier 2 sites have pledged resources, while the Tier 3 sites do not have any pledge resource available. +In terms of pledged resources, Italy contributes to the ATLAS computing as 9\% of both CPU and disk for the Tier 1. The share of the Tier 2 facilities corresponds to 7\% of disk and 9\% of CPU of the whole ATLAS computing infrastructure. The Italian Tier 1, together with the other Italian centers, provides both resources and expertise to the ATLAS computing community, and manages the so-called Italian Cloud of computing. Since 2015 the Italian Cloud does not only include Italian sites, but also Tier 3 sites of other countries, namely South Africa and Greece. +The computing resources, in terms of disk, tape and CPU, available in the Tier 1 at CNAF have been very important for all kind of activities, including event generation, simulation, reconstruction, reprocessing and analysis, for both MonteCarlo and real data. Its major contribution has been the data reprocessing, since this is a very I/O and memory intense operation, normally executed only in Tier 1 centers. In this sense CNAF has played a fundamental role for the fine measurement of the Higgs [3] properties in 2018 and other analysis. The Italian centers, including CNAF, have been very active not only in the operation side, but contributed a lot in various aspect of the Computing of the ATLAS experiment, in particular for what concerns the network, the storage systems, the storage federations and the monitoring tools. The Tier 1 at CNAF has been very important for the ATLAS community in 2018, for some specific activities: \begin{itemize} \item improvements on the WebDAV/HTTPS access for StoRM, in order to be used as main renaming method for the ATLAS files in StoRM and for http federation purposes; \item improvements of the dynamic model of the multi-core resources operated via the LSF resource management system and simplification of the PanDA queues, using the Harvester service to mediate the control and information flow between PanDA and the resources. -\item network troubleshooting via the Perfsonar-PS network monitoring system, used for the LHCONE overlay network, together with the other Tier-1 and Tier-2 sites; +\item network troubleshooting via the Perfsonar-PS network monitoring system, used for the LHCONE overlay network, together with the other Tier 1 and Tier 2 sites; \item planning, readiness testing and implementation of the HTCondor batch system for the farming resources management. \end{itemize} \section{Main achievements of ATLAS Computing centers in Italy} -The Italian Tier-2 Federation runs all the ATLAS computing activities in the Italian cloud supporting the operations at CNAF, the Italian Tier-1 centre, and the Milano, Napoli, Roma1 and Frascati Tier-2 sites. This insures an optimized use of the resources and a fair and efficient data access. The computing activities of the ATLAS collaboration have been constantly carried out over the whole 2018, in order to analyse the data of the Run-2 and produce the Monte Carlo data needed for the 2018 run. +The Italian Tier 2 Federation runs all the ATLAS computing activities in the Italian cloud supporting the operations at CNAF, the Italian Tier 1 center, and the Milano, Napoli, Roma1 and Frascati Tier 2 sites. This insures an optimized use of the resources and a fair and efficient data access. The computing activities of the ATLAS collaboration have been constantly carried out over the whole 2018, in order to analyse the data of the Run-2 and produce the Monte Carlo data needed for the 2018 run. -The LHC data taking started in April 2018 and, until the end of the operation in December 2018, all the Italian sites, the CNAF Tier-1 and the four Tier-2s, have been involved in all the computing operations of the collaboration: data reconstruction, Monte Carlo simulation, user and group analysis and data transfer among all the sites. Besides these activities, the Italian centers have contributed to the upgrade of the Computing Model both from the testing side and the development of specific working groups. ATLAS collected and registered at the Tier-0 ~60.6 fb$^{-1}$ and ~25 PB of raw and derived data, while the cumulative data volume distributed in all the data centers in the grid was of the order of ~80 PB. The data has been replicated with an efficiency of 100\% and an average throughput of the order of ~13 GB/s during the data taking period, with peaks above 25 GB/s. For just Italy, the average throughput was of the order of 800 MB/s with peaks above 2GB/s. The data replication speed from Tier-0 to the Tier-2s has been quite fast with a transfer time lower than 4 hours. The average number of simultaneous jobs running on the grid has been of about 110k for production (simulation and reconstruction) and data analysis, with peaks over 150k, with an average CPU efficiency up to more than 80\%. The use of the grid for analysis has been stable on ~26k simultaneous jobs, with peaks around the conferences’ periods to over 40k, showing the reliability and effectiveness of the use of grid tools for data analysis. +The LHC data taking started in April 2018 and, until the end of the operation in December 2018, all the Italian sites, the CNAF Tier 1 and the four Tier 2 sites, have been involved in all the computing operations of the collaboration: data reconstruction, Monte Carlo simulation, user and group analysis and data transfer among all the sites. Besides these activities, the Italian centers have contributed to the upgrade of the Computing Model both from the testing side and the development of specific working groups. ATLAS collected and registered at the Tier 0 ~60.6 fb$^{-1}$ and ~25 PB of raw and derived data, while the cumulative data volume distributed in all the data centers in the grid was of the order of ~80 PB. The data has been replicated with an efficiency of 100\% and an average throughput of the order of ~13 GB/s during the data taking period, with peaks above 25 GB/s. For just Italy, the average throughput was of the order of 800 MB/s with peaks above 2GB/s. The data replication speed from Tier 0 to the Tier 2 sites has been quite fast with a transfer time lower than 4 hours. The average number of simultaneous jobs running on the grid has been of about 110k for production (simulation and reconstruction) and data analysis, with peaks over 150k, with an average CPU efficiency up to more than 80\%. The use of the grid for analysis has been stable on ~26k simultaneous jobs, with peaks around the conferences’ periods to over 40k, showing the reliability and effectiveness of the use of grid tools for data analysis. The Italian sites contributed to the development of the Xrootd and http/webdav federation. In the latter case the access to the storage resources is managed using the http/webdav protocol, in collaboration with the CERN DPM team, the Belle2 experiment, the Canadian Corporate Cloud ant the RAL (UK) site. The purpose is to build a reliable storage federation, alternative to the Xrootd one, to access physics data both on the grid and on cloud storage infrastructures (like Amazon S3, MicroSoft Azure, etc). The Italian community is particularly involved in this project and the first results have been presented to the WLCG collaboration. @@ -68,7 +69,7 @@ The Italian community also contributes to develop new tools for distributed data The contribution of the Italian sites to the computing activities in terms of processed jobs and data recorded has been of about 9\%, corresponding to the order of the resource pledged to the collaboration, with very good performance in term of availability, reliability and efficiency. All the sites are always in the top positions in the ranking of the collaboration sites. -Besides the Tier-1 and Tier-2s, in 2018 also the Tier-3s gave a significant contribution to the Italian physicists community for the data analysis. The Tier-3s are local farms dedicated to the interactive data analysis, the last step of the analysis workflow, and to the grid analysis over small data sample. Several italian groups set up a farm for such a purpose in their universities and, after a testing and validation process performed by the distributed computing team of the collaboration, all have been recognized as official Tier-3s of the collaboration. +Besides the Tier 1 and Tier 2 sites, in 2018 also the Tier 3 sites gave a significant contribution to the Italian physicists community for the data analysis. The Tier 3 centers are local farms dedicated to the interactive data analysis, the last step of the analysis workflow, and to the grid analysis over small data sample. Several italian groups set up a farm for such a purpose in their universities and, after a testing and validation process performed by the distributed computing team of the collaboration, all have been recognized as official Tier 3s of the collaboration. @@ -76,7 +77,7 @@ Besides the Tier-1 and Tier-2s, in 2018 also the Tier-3s gave a significant cont -The ATLAS Computing Model was designed to have a sufficient redundancy of the available resources in order to tackle emergency situations like the flooding occurred on November 9th 2017 at CNAF. Thanks to the huge effort of the whole community of the CNAF, the operativity of the data centre restarted gradually from the second half of February 2018. A continuous interaction between ATLAS distributed computing community and CNAF people was needed to bring the computing operation fully back to normality. The deep collaboration was very successful and after one month the site was almost fully operational and the ATLAS data management and processing activities were running smoothly again. Eventually, the overall impact of the incident was limited enough, mainly thanks to the relatively quick recovery of the CNAF data center and to the robustness of the computing model. +The ATLAS Computing Model was designed to have a sufficient redundancy of the available resources in order to tackle emergency situations like the flooding occurred on November 9th 2017 at CNAF. Thanks to the huge effort of the whole community of the CNAF, the operativity of the data center restarted gradually from the second half of February 2018. A continuous interaction between ATLAS distributed computing community and CNAF people was needed to bring the computing operation fully back to normality. The deep collaboration was very successful and after one month the site was almost fully operational and the ATLAS data management and processing activities were running smoothly again. Eventually, the overall impact of the incident was limited enough, mainly thanks to the relatively quick recovery of the CNAF data center and to the robustness of the computing model. diff --git a/contributions/audit/Audit-2018.tex b/contributions/audit/Audit-2018.tex index 6696b6e6581b9c5608847ef2cc6e1cf7bf38b3e5..2af2956ddc80efabf92cea534412e978fa104041 100644 --- a/contributions/audit/Audit-2018.tex +++ b/contributions/audit/Audit-2018.tex @@ -3,9 +3,9 @@ \begin{document} \title{Internal Auditing INFN for GDPR compliance} -\author{V.~Ciaschini, P.~Belluomo} -\address{INFN CNAF, Viale Berti Pichat 6/2, 40127, Bologna, Italy} -\address{INFN sezione di Catania, Via Santa Sofia 64, 95123, Catania, Italy} +\author{V. Ciaschini$^1$, P. Belluomo$^2$} +\address{$^1$ INFN-CNAF, Bologna, IT} +\address{$^2$ INFN Sezione di Catania, Catania, IT} \begin{abstract} With the General Data Protection Regulation (GDPR) coming into diff --git a/contributions/borexino/Borexino_CNAFreport2018.tex b/contributions/borexino/Borexino_CNAFreport2018.tex index 4a48a49d16b15949d418f78acb50c5f0653fe380..49ddb71fc51b9a48dd4ea7df4792818c92229de6 100644 --- a/contributions/borexino/Borexino_CNAFreport2018.tex +++ b/contributions/borexino/Borexino_CNAFreport2018.tex @@ -3,8 +3,8 @@ \begin{document} \title{The Borexino experiment at the INFN- CNAF} -\author{Alessandra Carlotta Re\\ \small{on behalf of the BOREXINO collaboration}} -\address{Universit\`{a} degli Studi e INFN di Milano, via Celoria 16, 20133 Milano, Italy} +\author{Alessandra Carlotta Re$^1$\\ \small{on behalf of the BOREXINO collaboration}} +\address{$^1$ Universit\`{a} degli Studi di Milano e INFN Sezione di Milano, Milano, IT} \ead{alessandra.re@mi.infn.it} \begin{abstract} %OK diff --git a/contributions/chnet/ArchDiagram.png b/contributions/chnet/ArchDiagram.png new file mode 100644 index 0000000000000000000000000000000000000000..5719c587292599abde3f4d453bfacd9bdd1a30de Binary files /dev/null and b/contributions/chnet/ArchDiagram.png differ diff --git a/contributions/chnet/dhlab.tex b/contributions/chnet/dhlab.tex new file mode 100644 index 0000000000000000000000000000000000000000..93d184d951b889cea791539c466a5f251905b39a --- /dev/null +++ b/contributions/chnet/dhlab.tex @@ -0,0 +1,212 @@ +\documentclass[a4paper]{jpconf} +\usepackage[T1]{fontenc} +\usepackage[utf8]{inputenc} +\usepackage{graphicx} +\usepackage{url} + +\begin{document} + +\title{DHLab: a digital library for the INFN Cultural Heritage Network} + +\author{F. Proietti$^1$, L. dell'Agnello$^1$, F. Giacomini$^1$} +\address{$^1$ INFN-CNAF, Bologna, IT} +\ead{fabio.proietti@cnaf.infn.it} + +\begin{abstract} + + DHLab, as part of the Cultural Heritage Network (CHNet) promoted by + INFN, is a cloud-based environment to process, visualise and analyse + data acquired from members of the network and that will be provided + to technical and non-technical users. DHLab is under development and + currently its main features are a cloud service to upload and manage + the data, a form to assign metadata to uploaded datasets and a + service used to analyze data obtained from XRF measurements. + +\end{abstract} + +\section{Introduction} + +CHNet\footnote{http://chnet.infn.it/} is a network composed by several +INFN teams who devote their expertise in physics research to the study +and diagnostics of Cultural Heritage. By using their existing instruments, +developed for Nuclear Physics, or even by building new ones, +INFN laboratories started to address the needs of archaeologists, +historians, art historians, restorers and conservators. This unified +knowledge can provide useful indications about the correct procedures +to be applied for restoration or conservation, could be important to +date or verify, for example, the authenticity of an artwork or study +the provenance of raw material in order to retrace ancient trade +routes. In this context the purpose of the DHLab is to host all the +data acquired by the CHNet laboratories, together with the +descriptions and annotations added by humanists. + +\section{Architecture} + +The infrastructure system, shown in figure~\ref{fig:architecture}, +follows a cloud-based model and can be divided in multiple modular +frontends, providing the interface towards the clients, and a +monolithic backend service. + +\begin{figure}[ht] + \begin{center} + \includegraphics[scale=.4]{ArchDiagram.png} + \caption{\label{fig:architecture}High level overview of DHLab + architecture} + \end{center} +\end{figure} + +The frontend includes three main blocks: a cloud service, a metadata +form and an application service. Of these, the metadata form, used to +fill details about a work or an analysis (see +section~\ref{sec:metadata-form}), is usable also while being offline; +the requirement addresses the use case of an operator who, while +disconnected from the network, needs to fill the metadata saving them +as a file on the local machine. The same requirement may be at least +partly satisfied also for the application services. + +On the backend side, which is only partially implemented and not yet +even fully designed, we currently expect to have a listener, to +dispatch client requests, two data stores, one for user profiles and +the other for actual datasets, and a set of auxiliary services, for +example to automate the filling of the metadata form and to +standardize some of its fields (see again +section~\ref{sec:metadata-form}). + +The entire system is hosted at the CNAF data center. + +\section{Technologies and protocols} + +As stated above, the design of the system is not yet complete and we +are still investigating different options to address the challenges we +face. + +Open aspects concern: + +\begin{itemize} + +\item the data model, which must accomodate both datasets (possibly + composed of multiple files), the corresponding metadata and a + mechanism to link them together; + +\item the authentication and authorization model, which should use as + much as possible standard web technologies and have flexible + mechanisms to authenticate users coming from different institutions, + leveraging their own Identity Providers; + +\item how to access the available storage from a client, both to + upload datasets and their metadata and subsequently access them. + +\end{itemize} + +The experimentation makes use of an installation of +NextCloud~\cite{ref:nextcloud}, an open-source suite of client-server +software for creating and using file hosting services, with +functionality often extended through the use of plugins. + +Authentication is based on OpenID Connect~\cite{ref:oidc} and makes +use of the INDIGO-IAM~\cite{ref:iam} service, an Identity and Access +Management product developed within the EU-funded +INDIGO-DataCloud~\cite{ref:indigo} project. INDIGO-IAM offers a +service to manage identities, user enrollment, group membership, +attributes and policies to access distributed resources and services +in a homogeneous and interoperable way; hence it represents a perfect +match to manage users, groups and resources of the CHNet +organization. In particular INDIGO-IAM delegates the authentication of +a user to their home institution identity provider under a trust +agreement. + +NextCloud offers also the possibility to access data via the WebDAV +protocol, allowing users to mount the remote storage on their local +machine and see it as if it were a local disk. This feature becomes +useful when interaction through a web browser is not the most +effective tool, for example for batch or bulk operations. + +\section{Metadata Form} +\label{sec:metadata-form} + +The Metadata form is a web application whose purpose is to associate +metadata with art works, measurement campaigns and analysis +results. The application, written in Typescript~\cite{ref:typescript} +and based on the Angular 2 framework~\cite{ref:angular2}, is under +development; the main deployment option foresees its integration into +the cloud platform, but the combination with +Electron~\cite{ref:electron} makes a desktop application a viable +alternative. + +As shown in figure~\ref{fig:metadataSchema}, to fill the metadata form +a user can follow two paths: they can create a \textit{campaign} and +associate it with multiple \textit{sessions} and \textit{analyses} or +they can store information about a single \textit{analysis}. In +particular, each \textit{analysis} can be associated with one or more +\textit{datasets}, the studied \textit{object} (i.e.,~an art work) and +all the information about its \textit{type}, \textit{author}, +\textit{holder}, \textit{owner}, etc. In addition, users can provide +information about the analysis type, the operator who performed the +analysis, the devices, components and software used to scan, create or +read the resulting dataset. When completed, the resulting form, +translated into a JSON file, can be saved locally or uploaded to the +remote storage. + +\begin{figure}[ht] + \begin{center} + \includegraphics[scale=.4]{metadataSchema.png} + \end{center} + \caption{\label{fig:metadataSchema}Schema of the sections included + in the metadata description.} +\end{figure} + +\section{Application services} + +DHLab is also designed to provide visualization and analysis services +for some of the stored datasets. Currently a proof-of-concept +application is available, to visualize and perform some analysis of +images obtained from XRF scans~\cite{ref:xrf}. + +\section{Conclusions} + +DHLab is a project born from the need to group, share, catalogue and +reuse data that comes from measurements and analyses of cultural +heritage works. It aims at being flexible and usable by persons +covering different roles: physicists, computer scientists, cultural +heritage operators. The system is designed and deployed around a core +Cloud-based infrastructure, but some of its parts must be functioning +in offline situations. + +A web application for filling a form with metadata to be associated to +collected datasets according to an agreed-upon schema is being +developed. + +Other web applications are foreseen for the visualization and analysis +of the stored datasets, starting from those coming from XRF, +radiocarbon and thermoluminescence analysis. + +\section*{References} + +\begin{thebibliography}{9} +\bibitem{ref:nextcloud} NextCloud \url{https://nextcloud.com/} + +\bibitem{ref:oidc} OpenId Connect \url{https://openid.net/connect} + +\bibitem{ref:iam} A Ceccanti, E Vianello, M Caberletti. (2018, + May). INDIGO Identity and Access Management + (IAM). Zenodo. \url{http://doi.org/10.5281/zenodo.1874790} + +\bibitem{ref:indigo} The INDIGO-DataCloud project + \url{https://www.indigo-datacloud.eu/} + +\bibitem{ref:typescript} Typescript language + \url{https://www.typescriptlang.org/} + +\bibitem{ref:angular2} Angular 2 framework + \url{https://angular.io/} + +\bibitem{ref:electron} Electron + \url{https://electronjs.org/} + +\bibitem{ref:xrf} Cappelli L, Giacomini F, Taccetti F, Castelli L, + dell'Agnello L. 2016. A web application to analyse XRF scanning data. INFN-CNAF +Annual Report. \url{https://www.cnaf.infn.it/annual-report} + +\end{thebibliography} + +\end{document} diff --git a/contributions/chnet/metadataSchema.png b/contributions/chnet/metadataSchema.png new file mode 100644 index 0000000000000000000000000000000000000000..feea118d2d1d08b796e387f3c43d8f8a2400b7a7 Binary files /dev/null and b/contributions/chnet/metadataSchema.png differ diff --git a/contributions/cms/report-cms-feb-2019.tex b/contributions/cms/report-cms-feb-2019.tex index dfe65d3abd98eb7ce6c03c4a49ab7b6813c86f13..25abfaf7fc9832377f2b142add7f959325f23755 100644 --- a/contributions/cms/report-cms-feb-2019.tex +++ b/contributions/cms/report-cms-feb-2019.tex @@ -1,93 +1,94 @@ -\documentclass[a4paper]{jpconf} -\usepackage{graphicx} -\begin{document} -\title{The CMS Experiment at the INFN CNAF Tier1} - -\author{Giuseppe Bagliesi} - -\address{INFN Sezione di Pisa, L.go B.Pontecorvo 3, 56127 Pisa, Italy} - -\ead{giuseppe.bagliesi@cern.ch} - -\begin{abstract} -A brief description of the CMS Computing operations during LHC RunII and their recent developments is given. The CMS utilization at Tier-1 CNAF is described -\end{abstract} - -\section{Introduction} -The CMS Experiment \cite{CMS-descr} at CERN collects and analyses data from the pp collisions in the LHC Collider. -The first physics Run, at centre of mass energy of 7-8 TeV, started in late March 2010, and ended in February 2013; more than 25~fb$^{-1}$ of collisions were collected during the Run. RunII, at 13 TeV, started in 2015, and finished at the end of 2018. - -During the first two years of RunII, LHC has been able to largely exceed its design parameters: already in 2016 instantaneous luminosity reached $1.5\times 10^{34}\mathrm{cm^{-2}s^{-1}}$, 50\% more than the planned “high luminosity†LHC phase. The most astonishing achievement, still, is a huge improvement on the fraction of time LHC can serve physics collision, increased form ~35\% of RunI to more than 80\% in some months on 2016. -The most visible effect, computing wise, is a large increase of data to be stored, processed and analysed offline, with 2016 allowing for the collection of more than 40 fb$^{-1}$ of physics data. - -In 2017 CMS recorded more than 46 fb$^{-1}$ of pp collisions, in addition to the data collected during 2016. These data were collected under considerably higher than expected pileup conditions forcing CMS to request a lumi-levelling to PU~55 for the first hours of the LHC fill; this has challenged both the computing system and CMS analysts with more complex events to process with respect to the modelling. From the computing operations side, higher pileup meant larger events and more time to process events than anticipated in the 2017 planning. As these data taking conditions affected only the second part of the year, the average 2017 pileup was in line with that used during the CMS resource planning. - -2018 was another excellent year for LHC operations and luminosity delivered to the experiments. CMS recorded 64 fb$^{-1}$ of pp collisions during 2018, in addition to the 84 fb$^{-1}$ collected during 2016 and 2017. This brings the total luminosity delivered in RunII to more than 150 fb$^{-1}$ , and the total RunI + RunII dataset to more than 190 fb$^{-1}$. - - - -\section{Run II computing operations} -During Run~II, the computing 2004 model designed for Run~I has greatly evolved. The MONARC Hierarchical division of sites in Tier0, Tier-1s and Tier-2s, is still present, but less relevant during operations. All simulation, analysis and processing workflows can now be executed at virtually any site, with a full transfer mesh allowing for point-to-point data movement, outside the rigid hierarchy. - -Remote access to data, using WAN-aware protocols like XrootD and data federations, are used more and more instead of planned data movement, allowing for an easier exploitation of CPU resources. -Opportunistic computing is becoming a key component, with CMS having explored access to HPC systems, Commercial Clouds, and with the capability of running its workflows on virtually any (sizeable) resource we have access to. - -In 2018 CMS deployed singularity \cite{singu} to all sites supporting the CMS VO. Singularity is a container solution which allows CMS to select the OS on a per job basis and decouples the OS of worker nodes from that required by experiments. Sites can setup worker nodes with a Singularity supported OS and CMS will choose the appropriate OS image for each job. - -CMS deployed a new version of the prompt reconstruction software on July 2018, during LHC MD2. This software is adapted to detector upgrades and data taking conditions, and the production level of alignment and calibration algorithms is reached. Data collected before this point has now been reprocessed for a fully consistent data set for analysis, in time for the Moriond 2019 conference. Production and distributed analysis activities continued at a very high level throughout 2018. The MC17 campaign, to be used for Winter and Summer 18 conferences, continued throughout the year, with decreasing utilization of resources; overall, more than 15B events were available by the Summer. The equivalent simulation campaign for 2018 data, MC18, started in October 2018 and is now almost completed. - -Developments to increase CMS throughput and disk usage efficiently continue. Of particular interest is the development of the NanoAOD data tier as a new alternative for analysis users. -The NanoAOD size per event is approximately 1 kB, 30-50 times smaller than the MiniAOD data tier and relies on only simple data types rather than the hierarchical data format structure in the CMS MiniAOD (and AOD) data tier. NanoAOD samples for the 2016, 2017 and 2018 data and corresponding Monte Carlo simulation have been produced, and are being used in many analyses. NanoAOD is now automatically produced in all the central production campaigns, and fast reprocessing campaigns from MiniAOD to NanoAOD have been tested and are able to achieve more than 4B events per day using only a fraction of CMS resources. - - -\section{CMS WLCG Resources and expected increase} -CMS Computing model has been used to request resources for 2018-19 RunII data taking and reprocessing, with total requests (Tier-0 + Tier-1s + Tier-2s) exceeding 2073 kHS06, 172 PB on disk, and 320 PB on tape. -However the actual pledged resources have been substantially lower than the requests due to budget restrictions from the funding agencies. To reduce the impact of this issue, CMS was able to achieve and deploy several technological advancements, including reducing the needed amount of AOD(SIM) on disk and to reduce the amount of simulated raw events on tape. In addition, some computing resource providers were able to provide more than their pledged level of resources to CMS during 2018. -Thanks to the optimizations and technological improvements described before it has been possible to tune accordingly the computing model of CMS. Year-by-year increases, which would have been large in presence of the reference computing model, have been reduced substantially. - -Italy contributes to CMS computing with 13\% of the Tier-1 and Tier-2 resources. The increase of CNAF pledges for 2019 have been reduced by a factor two with respect to the original request, due to INFN budget limitations, and the remaining increase has been postponed to 2021. -The 2019 pledges are therefore 78 kHS06 of CPU, 8020 TB of disk, and 26 PB for tape. - -CMS usage of CNAF is very intense and it represents one of the largest Tier-1 in CMS as number of processed hours, after the US Tier-1; the same holds for total number of processed jobs, as shown in Fig.~\ref{cms-jobs}. - - -\begin{figure} -\begin{center} -\includegraphics[width=0.8\textwidth,bb=0 0 900 900]{tier1-jobs-2018.pdf} -\end{center} -\caption{\label{cms-jobs}Jobs processed at CMS Tier1s during 2018} -\end{figure} - - - -\section{The CNAF flood incident} -On November 9th 2017 a major incident happened when the CNAF computer center was flooded. -This caused an interruption of all CNAF services and the damage of many disk arrays and servers, as well as of the tape library. About 40 damaged tapes (out of a total of 150) belonged to CMS. They contained unique copy of MC and RECO data. Six tapes contained a 2nd custodial copy of RAW data. -A special recovery procedure was adopted by CNAF team through a specialized company and no data have been permanently lost. - -The impact of this incident for CMS, although serious, was mitigated thanks to the intrinsic redundancy of our distributed computing model. Other Tier1s increased temporary their share to compensate the CPU loss, deploying the 2018 pledges as soon as possible. -A full recovery of CMS services of CNAF was achieved by beginning of March 2018. - -It is important to point out that, despite the incident affecting the first months of 2018, the integrated site readiness of CNAF in 2018 was very good and at the same level or better than the other CMS Tier1s, see Fig.~\ref{tier1-cms-sr}. - -\begin{figure} -\begin{center} -\includegraphics[width=0.8\textwidth,bb=0 0 900 900]{tier1-readiness-2018.pdf} -\end{center} -\caption{\label{tier1-cms-sr}Site readiness of CMS Tier1s in 2018} -\end{figure} - - -\section{Conclusions} -CNAF is an important asset for the CMS Collaboration, being the second Tier1 in terms of resource utilization, pledges and availability. -The unfortunate incident of the end of 2017 has been managed professionally and efficiently by the CNAF staff, guaranteeing the fastest possible recovery with minimal data losses at the beginning of 2018. - - -\section*{References} -\begin{thebibliography}{9} -\bibitem{CMS-descr}CMS Collaboration, The CMS experiment at the CERN LHC, JINST 3 (2008) S08004, -doi:10.1088/1748-0221/3/08/S08004. -\bibitem{singu} http://singularity.lbl.gov/ -\end{thebibliography} - -\end{document} +\documentclass[a4paper]{jpconf} +\usepackage{graphicx} +\begin{document} +\title{The CMS Experiment at the INFN CNAF Tier 1} + +\author{Giuseppe Bagliesi$^1$} + +\address{$^1$ INFN Sezione di Pisa, Pisa, IT} + +\ead{giuseppe.bagliesi@cern.ch} + +\begin{abstract} +A brief description of the CMS Computing operations during LHC RunII and their recent developments is given. The CMS utilization at Tier 1 CNAF is described +\end{abstract} + +\section{Introduction} +The CMS Experiment \cite{CMS-descr} at CERN collects and analyses data from the pp collisions in the LHC Collider. +The first physics Run, at center of mass energy of 7-8 TeV, started in late March 2010, and ended in February 2013; more than 25~fb$^{-1}$ of collisions were collected during the Run. RunII, at 13 TeV, started in 2015, and finished at the end of 2018. + +During the first two years of RunII, LHC has been able to largely exceed its design parameters: already in 2016 instantaneous luminosity reached $1.5\times 10^{34}\mathrm{cm^{-2}s^{-1}}$, 50\% more than the planned “high luminosity†LHC phase. The most astonishing achievement, still, is a huge improvement on the fraction of time LHC can serve physics collision, increased form ~35\% of RunI to more than 80\% in some months on 2016. +The most visible effect, computing wise, is a large increase of data to be stored, processed and analysed offline, with 2016 allowing for the collection of more than 40 fb$^{-1}$ of physics data. + +In 2017 CMS recorded more than 46 fb$^{-1}$ of pp collisions, in addition to the data collected during 2016. These data were collected under considerably higher than expected pileup conditions forcing CMS to request a lumi-levelling to PU~55 for the first hours of the LHC fill; this has challenged both the computing system and CMS analysts with more complex events to process with respect to the modelling. From the computing operations side, higher pileup meant larger events and more time to process events than anticipated in the 2017 planning. As these data taking conditions affected only the second part of the year, the average 2017 pileup was in line with that used during the CMS resource planning. + +2018 was another excellent year for LHC operations and luminosity delivered to the experiments. CMS recorded 64 fb$^{-1}$ of pp collisions during 2018, in addition to the 84 fb$^{-1}$ collected during 2016 and 2017. This brings the total luminosity delivered in RunII to more than 150 fb$^{-1}$ , and the total RunI + RunII dataset to more than 190 fb$^{-1}$. + + + +\section{Run II computing operations} +During Run~II, the computing 2004 model designed for Run~I has greatly evolved. The MONARC Hierarchical division of sites in Tier 0, Tier 1s and Tier 2s, is still present, but less relevant during operations. All simulation, analysis and processing workflows can now be executed at virtually any site, with a full transfer mesh allowing for point-to-point data movement, outside the rigid hierarchy. + +Remote access to data, using WAN-aware protocols like XrootD and data federations, are used more and more instead of planned data movement, allowing for an easier exploitation of CPU resources. +Opportunistic computing is becoming a key component, with CMS having explored access to HPC systems, Commercial Clouds, and with the capability of running its workflows on virtually any (sizeable) resource we have access to. + +In 2018 CMS deployed singularity \cite{singu} to all sites supporting the CMS VO. Singularity is a container solution which allows CMS to select the OS on a per job basis and decouples the OS of worker nodes from that required by experiments. Sites can setup worker nodes with a Singularity supported OS and CMS will choose the appropriate OS image for each job. + +CMS deployed a new version of the prompt reconstruction software on July 2018, during LHC MD2. This software is adapted to detector upgrades and data taking conditions, and the production level of alignment and calibration algorithms is reached. Data collected before this point has now been reprocessed for a fully consistent data set for analysis, in time for the Moriond 2019 conference. Production and distributed analysis activities continued at a very high level throughout 2018. The MC17 campaign, to be used for Winter and Summer 18 conferences, continued throughout the year, with decreasing utilization of resources; overall, more than 15B events were available by the Summer. The equivalent simulation campaign for 2018 data, MC18, started in October 2018 and is now almost completed. + +Developments to increase CMS throughput and disk usage efficiently continue. Of particular interest is the development of the NanoAOD data tier as a new alternative for analysis users. +The NanoAOD size per event is approximately 1 kB, 30-50 times smaller than the MiniAOD data tier and relies on only simple data types rather than the hierarchical data format structure in the CMS MiniAOD (and AOD) data tier. NanoAOD samples for the 2016, 2017 and 2018 data and corresponding Monte Carlo simulation have been produced, and are being used in many analyses. NanoAOD is now automatically produced in all the central production campaigns, and fast reprocessing campaigns from MiniAOD to NanoAOD have been tested and are able to achieve more than 4B events per day using only a fraction of CMS resources. + + +\section{CMS WLCG Resources and expected increase} +CMS Computing model has been used to request resources for 2018-19 RunII data taking and reprocessing, with total requests (Tier 0 + Tier 1s + Tier 2s) exceeding 2073 kHS06, 172 PB on disk, and 320 PB on tape. +However the actual pledged resources have been substantially lower than the requests due to budget restrictions from the funding agencies. To reduce the impact of this issue, CMS was able to achieve and deploy several technological advancements, including reducing the needed amount of AOD(SIM) on disk and to reduce the amount of simulated raw events on tape. In addition, some computing resource providers were able to provide more than their pledged level of resources to CMS during 2018. +Thanks to the optimizations and technological improvements described before it has been possible to tune accordingly the computing model of CMS. Year-by-year increases, which would have been large in presence of the reference computing model, have been reduced substantially. + +Italy contributes to CMS computing with 13\% of the Tier 1 and Tier 2 resources. The increase of CNAF pledges for 2019 have been reduced by a factor two with respect to the original request, due to INFN budget limitations, and the remaining increase has been postponed to 2021. +The 2019 pledges are therefore 78 kHS06 of CPU, 8020 TB of disk, and 26 PB for tape. + +CMS usage of CNAF is very intense and it represents one of the largest Tier 1 in CMS as number of processed hours, after the US Tier 1; the same holds for total number of processed jobs, as shown in Fig.~\ref{cms-jobs}. + + +\begin{figure} +\begin{center} +\includegraphics[width=0.8\textwidth,bb=0 0 900 900]{tier1-jobs-2018.pdf} +\end{center} +\caption{\label{cms-jobs}Jobs processed at CMS Tier 1 sites during 2018} +\end{figure} + + + +\section{The CNAF flood incident} +On November 9th 2017 a major incident happened when the CNAF computer center was flooded. +This caused an interruption of all CNAF services and the damage of many disk arrays and servers, as well as of the tape library. About 40 damaged tapes (out of a total of 150) belonged to CMS. They contained unique copy of MC and RECO data. Six tapes contained a 2nd custodial copy of RAW data. +A special recovery procedure was adopted by CNAF team through a specialized company and no data have been permanently lost. + +The impact of this incident for CMS, although serious, was mitigated thanks to the intrinsic redundancy of our distributed computing model. Other Tier 1 sites increased temporary their share to compensate the CPU loss, deploying the 2018 pledges as soon as possible. +A full recovery of CMS services of CNAF was achieved by beginning of March 2018. + +It is important to point out that, despite the incident affecting the first months of 2018, the integrated site readiness of CNAF in 2018 was very good and at the same level or better than the other CMS Tier 1 sites, see Fig.~\ref{tier1-cms-sr}. + +\begin{figure} +\begin{center} +\includegraphics[width=0.8\textwidth,bb=0 0 900 900]{tier1-readiness-2018.pdf} +\end{center} +\caption{\label{tier1-cms-sr}Site readiness of CMS Tier 1s in 2018} +\end{figure} + + +\section{Conclusions} +CNAF is an important asset for the CMS Collaboration, being the second Tier 1 in terms of resource utilization, pledges and availability. +The unfortunate incident of the end of 2017 has been managed professionally and efficiently by the CNAF staff, guaranteeing the fastest possible recovery with minimal data losses at the beginning of 2018. + + +\section*{References} +\begin{thebibliography}{9} +\bibitem{CMS-descr}CMS Collaboration, The CMS experiment at the CERN LHC, JINST 3 (2008) S08004, +doi:10.1088/1748-0221/3/08/S08004. +\bibitem{singu} http://singularity.lbl.gov/ +\end{thebibliography} + +\end{document} +>>>>>>> df6666e07f77183d3b9b7271912d9bff64107129 diff --git a/contributions/cnprov/cnprov.tex b/contributions/cnprov/cnprov.tex index cfb780a2648b13cb8f1b2c327f7707dd0b28ed18..f16b514b200899ba587d615e99261b20963d3359 100644 --- a/contributions/cnprov/cnprov.tex +++ b/contributions/cnprov/cnprov.tex @@ -7,14 +7,14 @@ \title{CNAF Provisioning system: Puppet 5 upgrade} \author{ - Stefano Bovina$^1$, - Diego Michelotto$^1$, - Enrico Fattibene$^1$, - Antonio Falabella$^1$, - Andrea Chierici$^1$ + S. Bovina$^1$, + D. Michelotto$^1$, + E. Fattibene$^1$, + A. Falabella$^1$, + A. Chierici$^1$ } -\address{$^1$ INFN CNAF, Viale Berti Pichat 6/2, 40126, Bologna, Italy} +\address{$^1$ INFN-CNAF, Bologna, IT} \ead{ stefano.bovina@cnaf.infn.it, @@ -32,7 +32,7 @@ In this report we are going to describe activities that have been carried on in \end{abstract} \section{Provisioning at CNAF} -The installation and configuration activity, in a big computing centre like CNAF, must take into account the size of the resources +The installation and configuration activity, in a big computing center like CNAF, must take into account the size of the resources (roughly a thousand nodes to manage), the heterogeneity of the systems (virtual vs physical nodes, computing nodes and different type of servers) and the different working group in charge for their management. To meet this challenge CNAF implemented a unique solution, adopted by all the departments, diff --git a/contributions/cta/CTA_annualreport_2018_v1.tex b/contributions/cta/CTA_annualreport_2018_v1.tex index 55fc917d1098afaacc26cdd3039961c07078cc20..49c1559eddfd07f542572ac89a98e979dbdc95de 100644 --- a/contributions/cta/CTA_annualreport_2018_v1.tex +++ b/contributions/cta/CTA_annualreport_2018_v1.tex @@ -5,13 +5,12 @@ \begin{document} \title{The Cherenkov Telescope Array} -\author{L. Arrabito$^1$, C. Bigongiari$^2$, F. Di Pierro$^3$ and P. Vallania$^{3,4}$} +\author{L. Arrabito$^1$, C. Bigongiari$^2$, F. Di Pierro$^3$, P. Vallania$^{3,4}$} -\address{$^1$ Laboratoire Univers et Particules de Montpellier, Universit\'e de Montpellier II Place Eug\`ene Bataillon - CC 72, CNRS/IN2P3, - F-34095 Montpellier, France} -\address{$^2$ INAF Osservatorio Astronomico di Roma - Via Frascati 33, 00040, Monte Porzio Catone (RM), Italy} -\address{$^3$ INFN Sezione di Torino - Via Pietro Giuria 1, 10125, Torino (TO), Italy} -\address{$^4$ INAF Osservatorio Astrofisico di Torino - Via Pietro Giuria 1, 10125, Torino (TO), Italy} +\address{$^1$ Laboratoire Univers et Particules de Montpellier et Universit\'e de Montpellier II, Montpellier, FR} +\address{$^2$ INAF Osservatorio Astronomico di Roma, Monte Porzio Catone (RM), IT} +\address{$^3$ INFN Sezione di Torino, Torino, IT} +\address{$^4$ INAF Osservatorio Astrofisico di Torino, Torino, IT} \ead{arrabito@in2p3.fr, ciro.bigongiari@oa-roma.inaf.it, federico.dipierro@to.infn.it, piero.vallania@to.infn.it} diff --git a/contributions/dampe/main.tex b/contributions/dampe/main.tex index 8cbdef7e3504a7055897f08faa619a6ba9f50503..22fa0abd85cb1eae50a58a7ff98a5d7fcb6c0ee9 100644 --- a/contributions/dampe/main.tex +++ b/contributions/dampe/main.tex @@ -9,16 +9,16 @@ \author{G. Ambrosi$^1$, G. Donvito$^5$, D.F.Droz$^6$, M. Duranti$^1$, D. D'Urso$^{2,3,4}$, F. Gargano$^{5,\ast}$, G. Torralba Elipe$^{7,8}$} -\address{$^1$ INFN, Sezione di Perugia, I-06100 Perugia, Italy} -\address{$^2$ Universit\`a di Sassari, I-07100 Sassari, Italy} -\address{$^3$ ASDC, I-00133 Roma, Italy} -\address{$^4$ INFN-LNS, I-95123 Catania, Italy} +\address{$^1$ INFN Sezione di Perugia, Perugia, IT} +\address{$^2$ Universit\`a di Sassari, Sassari, IT} +\address{$^3$ ASDC, Roma, IT} +\address{$^4$ INFN - Laboratori Nazionali del Sud, Catania, IT} %\address{$^3$ Universit\`a di Perugia, I-06100 Perugia, Italy} -\address{$^5$ INFN, Sezione di Bari, I-70125 Bari, Italy} -\address{$^6$ University of Geneva, Departement de physique nucléaire et corpusculaire (DPNC), CH-1211, Gen\`eve 4, Switzerland} +\address{$^5$ INFN Sezione di Bari, Bari, IT} +\address{$^6$ University of Geneva, Gen\`eve, CH} -\address{$^7$ Gran Sasso Science Institute, L'Aquila, Italy} -\address{$^8$ INFN - Laboratori Nazionali del Gran Sasso, L'Aquila, Italy} +\address{$^7$ Gran Sasso Science Institute, L'Aquila, IT} +\address{$^8$ INFN - Laboratori Nazionali del Gran Sasso, L'Aquila, IT} \address{DAMPE experiment \url{http://dpnc.unige.ch/dampe/}, @@ -162,4 +162,4 @@ The DAMPE mission was founded by the strategic priority science and technology p \end{thebibliography} -\end{document} \ No newline at end of file +\end{document} diff --git a/contributions/dmsq/dmsq2018.tex b/contributions/dmsq/dmsq2018.tex index 44a7bd1b74f80f361926dbb274231ac99203056a..ca7c5880f3fb81d8f3c04e731b94f400644559e3 100644 --- a/contributions/dmsq/dmsq2018.tex +++ b/contributions/dmsq/dmsq2018.tex @@ -7,9 +7,9 @@ \begin{document} \title{Comparing Data Mining Techniques for Software Defect Prediction} -\author{Marco Canaparo, Elisabetta Ronchieri} +\author{M. Canaparo$^1$, E. Ronchieri$^1$} -\address{INFN CNAF, Bologna, Italy} +\address{$^1$ INFN-CNAF, Bologna, IT} \ead{marco.canaparo@cnaf.infn.it, elisabetta.ronchieri@cnaf.infn.it} diff --git a/contributions/ds_devops_pe/Artifact/ds_devops_pe.pdf b/contributions/ds_devops_pe/Artifact/ds_devops_pe.pdf new file mode 100644 index 0000000000000000000000000000000000000000..07f6fd5e76aa9a20e73cc935b8acb65ca3bbf60c Binary files /dev/null and b/contributions/ds_devops_pe/Artifact/ds_devops_pe.pdf differ diff --git a/contributions/ds_devops_pe/Artifact/iopams.sty b/contributions/ds_devops_pe/Artifact/iopams.sty new file mode 100644 index 0000000000000000000000000000000000000000..044dde929745d48d13601b572a0f586728ebf0a4 --- /dev/null +++ b/contributions/ds_devops_pe/Artifact/iopams.sty @@ -0,0 +1,87 @@ +%% +%% This is file `iopams.sty' +%% File to include AMS fonts and extra definitions for bold greek +%% characters for use with iopart.cls +%% +\NeedsTeXFormat{LaTeX2e} +\ProvidesPackage{iopams}[1997/02/13 v1.0] +\RequirePackage{amsgen}[1995/01/01] +\RequirePackage{amsfonts}[1995/01/01] +\RequirePackage{amssymb}[1995/01/01] +\RequirePackage{amsbsy}[1995/01/01] +% +\iopamstrue % \newif\ifiopams in iopart.cls & iopbk2e.cls +% % allows optional text to be in author guidelines +% +% Bold lower case Greek letters +% +\newcommand{\balpha}{\boldsymbol{\alpha}} +\newcommand{\bbeta}{\boldsymbol{\beta}} +\newcommand{\bgamma}{\boldsymbol{\gamma}} +\newcommand{\bdelta}{\boldsymbol{\delta}} +\newcommand{\bepsilon}{\boldsymbol{\epsilon}} +\newcommand{\bzeta}{\boldsymbol{\zeta}} +\newcommand{\bfeta}{\boldsymbol{\eta}} +\newcommand{\btheta}{\boldsymbol{\theta}} +\newcommand{\biota}{\boldsymbol{\iota}} +\newcommand{\bkappa}{\boldsymbol{\kappa}} +\newcommand{\blambda}{\boldsymbol{\lambda}} +\newcommand{\bmu}{\boldsymbol{\mu}} +\newcommand{\bnu}{\boldsymbol{\nu}} +\newcommand{\bxi}{\boldsymbol{\xi}} +\newcommand{\bpi}{\boldsymbol{\pi}} +\newcommand{\brho}{\boldsymbol{\rho}} +\newcommand{\bsigma}{\boldsymbol{\sigma}} +\newcommand{\btau}{\boldsymbol{\tau}} +\newcommand{\bupsilon}{\boldsymbol{\upsilon}} +\newcommand{\bphi}{\boldsymbol{\phi}} +\newcommand{\bchi}{\boldsymbol{\chi}} +\newcommand{\bpsi}{\boldsymbol{\psi}} +\newcommand{\bomega}{\boldsymbol{\omega}} +\newcommand{\bvarepsilon}{\boldsymbol{\varepsilon}} +\newcommand{\bvartheta}{\boldsymbol{\vartheta}} +\newcommand{\bvaromega}{\boldsymbol{\varomega}} +\newcommand{\bvarrho}{\boldsymbol{\varrho}} +\newcommand{\bvarzeta}{\boldsymbol{\varsigma}} %NB really sigma +\newcommand{\bvarsigma}{\boldsymbol{\varsigma}} +\newcommand{\bvarphi}{\boldsymbol{\varphi}} +% +% Bold upright capital Greek letters +% +\newcommand{\bGamma}{\boldsymbol{\Gamma}} +\newcommand{\bDelta}{\boldsymbol{\Delta}} +\newcommand{\bTheta}{\boldsymbol{\Theta}} +\newcommand{\bLambda}{\boldsymbol{\Lambda}} +\newcommand{\bXi}{\boldsymbol{\Xi}} +\newcommand{\bPi}{\boldsymbol{\Pi}} +\newcommand{\bSigma}{\boldsymbol{\Sigma}} +\newcommand{\bUpsilon}{\boldsymbol{\Upsilon}} +\newcommand{\bPhi}{\boldsymbol{\Phi}} +\newcommand{\bPsi}{\boldsymbol{\Psi}} +\newcommand{\bOmega}{\boldsymbol{\Omega}} +% +% Bold versions of miscellaneous symbols +% +\newcommand{\bpartial}{\boldsymbol{\partial}} +\newcommand{\bell}{\boldsymbol{\ell}} +\newcommand{\bimath}{\boldsymbol{\imath}} +\newcommand{\bjmath}{\boldsymbol{\jmath}} +\newcommand{\binfty}{\boldsymbol{\infty}} +\newcommand{\bnabla}{\boldsymbol{\nabla}} +\newcommand{\bdot}{\boldsymbol{\cdot}} +% +% Symbols for caption +% +\renewcommand{\opensquare}{\mbox{$\square$}} +\renewcommand{\opentriangle}{\mbox{$\vartriangle$}} +\renewcommand{\opentriangledown}{\mbox{$\triangledown$}} +\renewcommand{\opendiamond}{\mbox{$\lozenge$}} +\renewcommand{\fullsquare}{\mbox{$\blacksquare$}} +\newcommand{\fulldiamond}{\mbox{$\blacklozenge$}} +\newcommand{\fullstar}{\mbox{$\bigstar$}} +\newcommand{\fulltriangle}{\mbox{$\blacktriangle$}} +\newcommand{\fulltriangledown}{\mbox{$\blacktriangledown$}} + +\endinput +%% +%% End of file `iopams.sty'. diff --git a/contributions/ds_devops_pe/Artifact/jpconf.cls b/contributions/ds_devops_pe/Artifact/jpconf.cls new file mode 100644 index 0000000000000000000000000000000000000000..09f509fdcfde0543cfbc37e4f64c02e11d9b4972 --- /dev/null +++ b/contributions/ds_devops_pe/Artifact/jpconf.cls @@ -0,0 +1,957 @@ +\NeedsTeXFormat{LaTeX2e}[1995/12/01] +\ProvidesClass{jpconf} + [2007/03/07 v1.1 + LaTeX class for Journal of Physics: Conference Series] +%\RequirePackage{graphicx} +\newcommand\@ptsize{1} +\newif\if@restonecol +\newif\if@letterpaper +\newif\if@titlepage +\newif\ifiopams +\@titlepagefalse +\@letterpaperfalse +\DeclareOption{a4paper} + {\setlength\paperheight {297mm}% + \setlength\paperwidth {210mm}% +\@letterpaperfalse} +\DeclareOption{letterpaper} + {\setlength\paperheight {279.4mm}% + \setlength\paperwidth {215.9mm}% +\@letterpapertrue} +\DeclareOption{landscape} + {\setlength\@tempdima {\paperheight}% + \setlength\paperheight {\paperwidth}% + \setlength\paperwidth {\@tempdima}} +\DeclareOption{twoside}{\@twosidetrue \@mparswitchtrue} +\renewcommand\@ptsize{1} +%\ExecuteOptions{A4paper, twoside} +\ExecuteOptions{A4paper} +\ProcessOptions +\DeclareMathAlphabet{\bi}{OML}{cmm}{b}{it} +\DeclareMathAlphabet{\bcal}{OMS}{cmsy}{b}{n} +\input{jpconf1\@ptsize.clo} +\setlength\lineskip{1\p@} +\setlength\normallineskip{1\p@} +\renewcommand\baselinestretch{} +\setlength\parskip{0\p@ \@plus \p@} +\@lowpenalty 51 +\@medpenalty 151 +\@highpenalty 301 +\setlength\parindent{5mm} +\setcounter{topnumber}{8} +\renewcommand\topfraction{1} +\setcounter{bottomnumber}{3} +\renewcommand\bottomfraction{.99} +\setcounter{totalnumber}{8} +\renewcommand\textfraction{0.01} +\renewcommand\floatpagefraction{.8} +\setcounter{dbltopnumber}{6} +\renewcommand\dbltopfraction{1} +\renewcommand\dblfloatpagefraction{.8} +\renewcommand{\title}{\@ifnextchar[{\@stitle}{\@ftitle}} +\pretolerance=5000 +\tolerance=8000 +% Headings for all pages apart from first +% +\def\ps@headings{% + \let\@oddfoot\@empty + \let\@evenfoot\@empty + \let\@oddhead\@empty + \let\@evenhead\@empty + %\def\@evenhead{\thepage\hfil\itshape\rightmark}% + %\def\@oddhead{{\itshape\leftmark}\hfil\thepage}% + %\def\@evenhead{{\itshape Journal of Physics: Conference Series}\hfill}% + %\def\@oddhead{\hfill {\itshape Journal of Physics: Conference Series}}%% + \let\@mkboth\markboth + \let\sectionmark\@gobble + \let\subsectionmark\@gobble} +% +% Headings for first page +% +\def\ps@myheadings{\let\@oddfoot\@empty\let\@evenfoot\@empty + \let\@oddhead\@empty\let\@evenhead\@empty + \let\@mkboth\@gobbletwo + \let\sectionmark\@gobble + \let\subsectionmark\@gobble} +% +\def\@stitle[#1]#2{\markboth{#1}{#1}% + %\pagestyle{empty}% + \thispagestyle{myheadings} + \vspace*{25mm}{\exhyphenpenalty=10000\hyphenpenalty=10000 + %\Large +\fontsize{18bp}{24bp}\selectfont\bf\raggedright\noindent#2\par}} +\def\@ftitle#1{\markboth{#1}{#1}% + \thispagestyle{myheadings} +%\pagestyle{empty}% + \vspace*{25mm}{\exhyphenpenalty=10000\hyphenpenalty=10000 + %\Large\raggedright\noindent\bf#1\par} +\fontsize{18bp}{24bp}\selectfont\bf\noindent\raggedright#1\par}} +%AUTHOR +\renewcommand{\author}{\@ifnextchar[{\@sauthor}{\@fauthor}} +\def\@sauthor[#1]#2{\markright{#1} % for production only + \vspace*{1.5pc}% + \begin{indented}% + \item[]\normalsize\bf\raggedright#2 + \end{indented}% + \smallskip} +\def\@fauthor#1{%\markright{#1} for production only + \vspace*{1.5pc}% + \begin{indented}% + \item[]\normalsize\bf\raggedright#1 + \end{indented}% + \smallskip} +%E-MAIL +\def\eads#1{\vspace*{5pt}\address{E-mail: #1}} +\def\ead#1{\vspace*{5pt}\address{E-mail: \mailto{#1}}} +\def\mailto#1{{\tt #1}} +%ADDRESS +\newcommand{\address}[1]{\begin{indented} + \item[]\rm\raggedright #1 + \end{indented}} +\newlength{\indentedwidth} +\newdimen\mathindent +\mathindent = 6pc +\indentedwidth=\mathindent +% FOOTNOTES +%\renewcommand\footnoterule{% +% \kern-3\p@ +% \hrule\@width.4\columnwidth +% \kern2.6\p@} +%\newcommand\@makefntext[1]{% +% \parindent 1em% +% \noindent +% \hb@xt@1.8em{\hss\@makefnmark}#1} +% Footnotes: symbols selected in same order as address indicators +% unless optional argument of [<num>] use to specify required symbol, +% 1=\dag, 2=\ddag, etc +% Usage: \footnote{Text of footnote} +% \footnote[3]{Text of footnote} +% +\def\footnoterule{}% +\setcounter{footnote}{0} +\long\def\@makefntext#1{\parindent 1em\noindent + \makebox[1em][l]{\footnotesize\rm$\m@th{\fnsymbol{footnote}}$}% + \footnotesize\rm #1} +\def\@makefnmark{\normalfnmark} +\def\normalfnmark{\hbox{${\fnsymbol{footnote}}\m@th$}} +\def\altfnmark{\hbox{$^{\rm Note}\ {\fnsymbol{footnote}}\m@th$}} +\def\footNote#1{\let\@makefnmark\altfnmark\footnote{#1}\let\@makefnmark\normalfnmark} +\def\@thefnmark{\fnsymbol{footnote}} +\def\footnote{\protect\pfootnote} +\def\pfootnote{\@ifnextchar[{\@xfootnote}{\stepcounter{\@mpfn}% + \begingroup\let\protect\noexpand + \xdef\@thefnmark{\thempfn}\endgroup + \@footnotemark\@footnotetext}} +\def\@xfootnote[#1]{\setcounter{footnote}{#1}% + \addtocounter{footnote}{-1}\footnote} + +\newcommand\ftnote{\protect\pftnote} +\newcommand\pftnote[1]{\setcounter{footnote}{#1}% + \addtocounter{footnote}{-1}\footnote} +\newcommand{\fnm}[1]{\setcounter{footnote}{#1}\footnotetext} + +\def\@fnsymbol#1{\ifnum\thefootnote=99\hbox{*}\else^{\thefootnote}\fi\relax} +% +% Address marker +% +\newcommand{\ad}[1]{\noindent\hbox{$^{#1}$}\relax} +\newcommand{\adnote}[2]{\noindent\hbox{$^{#1,}$}\setcounter{footnote}{#2}% + \addtocounter{footnote}{-1}\footnote} +\def\@tnote{} +\newcounter{oldftnote} +\newcommand{\tnote}[1]{*\gdef\@tnote{% + \setcounter{oldftnote}{\c@footnote}% + \setcounter{footnote}{99}% + \footnotetext{#1}% + \setcounter{footnote}{\c@oldftnote}\addtocounter{footnote}{-1}}} +%================== +% Acknowledgments (no heading if letter) +% Usage \ack for Acknowledgments, \ackn for Acknowledgement +\def\ack{\section*{Acknowledgments}} +\def\ackn{\section*{Acknowledgment}} +%SECTION DEFINITIONS +\setcounter{secnumdepth}{3} +\newcounter {section} +\newcounter {subsection}[section] +\newcounter {subsubsection}[subsection] +\newcounter {paragraph}[subsubsection] +\newcounter {subparagraph}[paragraph] +\renewcommand \thesection {\arabic{section}} +\renewcommand\thesubsection {\thesection.\arabic{subsection}} +\renewcommand\thesubsubsection{\thesubsection .\arabic{subsubsection}} +\renewcommand\theparagraph {\thesubsubsection.\arabic{paragraph}} +\renewcommand\thesubparagraph {\theparagraph.\arabic{subparagraph}} +%\nosections +\def\nosections{\vspace{30\p@ plus12\p@ minus12\p@} + \noindent\ignorespaces} + +%\renewcommand{\@startsection}[6] +%{% +%\if@noskipsec \leavevmode \fi +%\par +% \@tempskipa #4\relax +%%\@tempskipa 0pt\relax +% \@afterindenttrue +% \ifdim \@tempskipa <\z@ +% \@tempskipa -\@tempskipa \@afterindentfalse +% \fi +% \if@nobreak +% \everypar{}% +% \else +% \addpenalty\@secpenalty\addvspace\@tempskipa +% \fi +% \@ifstar +% {\@ssect{#3}{#4}{#5}{#6}}% +% {\@dblarg{\@sect{#1}{#2}{#3}{#4}{#5}{#6}}}} +%\renewcommand{\@sect}[8]{% +% \ifnum #2>\c@secnumdepth +% \let\@svsec\@empty +% \else +% \refstepcounter{#1}% +% \protected@edef\@svsec{\@seccntformat{#1}\relax}% +% \fi +% \@tempskipa #5\relax +% \ifdim \@tempskipa>\z@ +% \begingroup +% #6{% +% \@hangfrom{\hskip #3\relax\@svsec}% +% \interlinepenalty \@M #8\@@par}% +% \endgroup +% \csname #1mark\endcsname{#7}% +% \addcontentsline{toc}{#1}{% +% \ifnum #2>\c@secnumdepth \else +% \protect\numberline{\csname the#1\endcsname}% +% \fi +% #7}% +% \else +% \def\@svsechd{% +% #6{\hskip #3\relax +% \@svsec #8}% +% \csname #1mark\endcsname{#7}% +% \addcontentsline{toc}{#1}{% +% \ifnum #2>\c@secnumdepth \else +% \protect\numberline{\csname the#1\endcsname}% +% \fi +% #7}}% +% \fi +% \@xsect{#5}} +%\renewcommand{\@xsect}[1]{% +% \@tempskipa #1\relax +% \ifdim \@tempskipa>\z@ +% \par \nobreak +% \vskip \@tempskipa +% \@afterheading +% \else +% \@nobreakfalse +% \global\@noskipsectrue +% \everypar{% +% \if@noskipsec +% \global\@noskipsecfalse +% {\setbox\z@\lastbox}% +% \clubpenalty\@M +% \begingroup \@svsechd \endgroup +% \unskip +% \@tempskipa #1\relax +% \hskip -\@tempskipa +% \else +% \clubpenalty \@clubpenalty +% \everypar{}% +% \fi}% +% \fi +% \ignorespaces} +%======================================================================== +\newcommand\section{\@startsection {section}{1}{\z@}% + {-3.25ex\@plus -1ex \@minus -.2ex}% + {1sp}% + {\reset@font\normalsize\bfseries\raggedright}} +\newcommand\subsection{\@startsection{subsection}{2}{\z@}% + {-3.25ex\@plus -1ex \@minus -.2ex}% + {1sp}% + {\reset@font\normalsize\itshape\raggedright}} +\newcommand\subsubsection{\@startsection{subsubsection}{3}{\z@}% + {-3.25ex\@plus -1ex \@minus -.2ex}% + {-1em \@plus .2em}% + {\reset@font\normalsize\itshape}} +\newcommand\paragraph{\@startsection{paragraph}{4}{\z@}% + {3.25ex \@plus1ex \@minus.2ex}% + {-1em}% + {\reset@font\normalsize\itshape}} +\newcommand\subparagraph{\@startsection{subparagraph}{5}{\parindent}% + {3.25ex \@plus1ex \@minus .2ex}% + {-1em}% + {\reset@font\normalsize\itshape}} +\def\@sect#1#2#3#4#5#6[#7]#8{\ifnum #2>\c@secnumdepth + \let\@svsec\@empty\else + \refstepcounter{#1}\edef\@svsec{\csname the#1\endcsname. }\fi + \@tempskipa #5\relax + \ifdim \@tempskipa>\z@ + \begingroup #6\relax + \noindent{\hskip #3\relax\@svsec}{\interlinepenalty \@M #8\par}% + \endgroup + \csname #1mark\endcsname{#7}\addcontentsline + {toc}{#1}{\ifnum #2>\c@secnumdepth \else + \protect\numberline{\csname the#1\endcsname}\fi + #7}\else + \def\@svsechd{#6\hskip #3\relax %% \relax added 2 May 90 + \@svsec #8\csname #1mark\endcsname + {#7}\addcontentsline + {toc}{#1}{\ifnum #2>\c@secnumdepth \else + \protect\numberline{\csname the#1\endcsname}\fi + #7}}\fi + \@xsect{#5}} +% +\def\@ssect#1#2#3#4#5{\@tempskipa #3\relax + \ifdim \@tempskipa>\z@ + \begingroup #4\noindent{\hskip #1}{\interlinepenalty \@M #5\par}\endgroup + \else \def\@svsechd{#4\hskip #1\relax #5}\fi + \@xsect{#3}} +% LIST DEFINITIONS +\setlength\leftmargini {2em} +\leftmargin \leftmargini +\setlength\leftmarginii {2em} +\setlength\leftmarginiii {1.8em} +\setlength\leftmarginiv {1.6em} + \setlength\leftmarginv {1em} + \setlength\leftmarginvi {1em} +\setlength\leftmargin{\leftmargini} +\setlength \labelsep {.5em} +\setlength \labelwidth{\leftmargini} +\addtolength\labelwidth{-\labelsep} +\@beginparpenalty -\@lowpenalty +\@endparpenalty -\@lowpenalty +\@itempenalty -\@lowpenalty +\renewcommand\theenumi{\roman{enumi}} +\renewcommand\theenumii{\alph{enumii}} +\renewcommand\theenumiii{\arabic{enumiii}} +\renewcommand\theenumiv{\Alph{enumiv}} +\newcommand\labelenumi{(\theenumi)} +\newcommand\labelenumii{(\theenumii)} +\newcommand\labelenumiii{\theenumiii.} +\newcommand\labelenumiv{(\theenumiv)} +\renewcommand\p@enumii{(\theenumi)} +\renewcommand\p@enumiii{(\theenumi.\theenumii)} +\renewcommand\p@enumiv{(\theenumi.\theenumii.\theenumiii)} +\newcommand\labelitemi{$\m@th\bullet$} +\newcommand\labelitemii{\normalfont\bfseries --} +\newcommand\labelitemiii{$\m@th\ast$} +\newcommand\labelitemiv{$\m@th\cdot$} +\renewcommand \theequation {\@arabic\c@equation} + +%%%%%%%%%%%%% Figures +\newcounter{figure} +\renewcommand\thefigure{\@arabic\c@figure} +\def\fps@figure{tbp} +\def\ftype@figure{1} +\def\ext@figure{lof} +\def\fnum@figure{\figurename~\thefigure} +\newenvironment{figure}{\footnotesize\rm\@float{figure}}% + {\end@float\normalsize\rm} +\newenvironment{figure*}{\footnotesize\rm\@dblfloat{figure}}{\end@dblfloat} +\newcounter{table} +\renewcommand\thetable{\@arabic\c@table} +\def\fps@table{tbp} +\def\ftype@table{2} +\def\ext@table{lot} +\def\fnum@table{\tablename~\thetable} +\newenvironment{table}{\footnotesize\rm\@float{table}}% + {\end@float\normalsize\rm} +\newenvironment{table*}{\footnotesize\rm\@dblfloat{table}}% + {\end@dblfloat\normalsize\rm} +\newlength\abovecaptionskip +\newlength\belowcaptionskip +\setlength\abovecaptionskip{10\p@} +\setlength\belowcaptionskip{0\p@} +%Table Environments +%\newenvironment{tableref}[3][\textwidth]{% +%\begin{center}% +%\begin{table}% +%\captionsetup[table]{width=#1} +%\centering\caption{\label{#2}#3}}{\end{table}\end{center}} +%%%%%%%%%%%%%%%%% +%\newcounter{figure} +%\renewcommand \thefigure {\@arabic\c@figure} +%\def\fps@figure{tbp} +%\def\ftype@figure{1} +%\def\ext@figure{lof} +%\def\fnum@figure{\figurename~\thefigure} +%ENVIRONMENT: figure +%\newenvironment{figure} +% {\@float{figure}} +% {\end@float} +%ENVIRONMENT: figure* +%\newenvironment{figure*} +% {\@dblfloat{figure}} +% {\end@dblfloat} +%ENVIRONMENT: table +%\newcounter{table} +%\renewcommand\thetable{\@arabic\c@table} +%\def\fps@table{tbp} +%\def\ftype@table{2} +%\def\ext@table{lot} +%\def\fnum@table{\tablename~\thetable} +%\newenvironment{table} +% {\@float{table}} +% {\end@float} +%ENVIRONMENT: table* +%\newenvironment{table*} +% {\@dblfloat{table}} +% {\end@dblfloat} +%\newlength\abovecaptionskip +%\newlength\belowcaptionskip +%\setlength\abovecaptionskip{10\p@} +%\setlength\belowcaptionskip{0\p@} +% CAPTIONS +% Added redefinition of \@caption so captions are not written to +% aux file therefore less need to \protect fragile commands +% +\long\def\@caption#1[#2]#3{\par\begingroup + \@parboxrestore + \normalsize + \@makecaption{\csname fnum@#1\endcsname}{\ignorespaces #3}\par + \endgroup} +\long\def\@makecaption#1#2{% + \vskip\abovecaptionskip + \sbox\@tempboxa{{\bf #1.} #2}% + \ifdim \wd\@tempboxa >\hsize + {\bf #1.} #2\par + \else + \global \@minipagefalse + \hb@xt@\hsize{\hfil\box\@tempboxa\hfil}% + \fi + \vskip\belowcaptionskip} +\DeclareOldFontCommand{\rm}{\normalfont\rmfamily}{\mathrm} +\DeclareOldFontCommand{\sf}{\normalfont\sffamily}{\mathsf} +\DeclareOldFontCommand{\tt}{\normalfont\ttfamily}{\mathtt} +\DeclareOldFontCommand{\bf}{\normalfont\bfseries}{\mathbf} +\DeclareOldFontCommand{\it}{\normalfont\itshape}{\mathit} +\DeclareOldFontCommand{\sl}{\normalfont\slshape}{\@nomath\sl} +\DeclareOldFontCommand{\sc}{\normalfont\scshape}{\@nomath\sc} +\DeclareRobustCommand*\cal{\@fontswitch\relax\mathcal} +\DeclareRobustCommand*\mit{\@fontswitch\relax\mathnormal} +%\newcommand\@pnumwidth{1.55em} +%\newcommand\@tocrmarg{2.55em} +%\newcommand\@dotsep{4.5} +%\setcounter{tocdepth}{3} +%\newcommand\tableofcontents{% +% \section*{\contentsname +% \@mkboth{% +% \MakeUppercase\contentsname}{\MakeUppercase\contentsname}}% +% \@starttoc{toc}% +% } +%\newcommand*\l@part[2]{% +% \ifnum \c@tocdepth >-2\relax +% \addpenalty\@secpenalty +% \addvspace{2.25em \@plus\p@}% +% \begingroup +% \parindent \z@ \rightskip \@pnumwidth +% \parfillskip -\@pnumwidth +% {\leavevmode +% \large \bfseries #1\hfil \hb@xt@\@pnumwidth{\hss #2}}\par +% \nobreak +% \if@compatibility +% \global\@nobreaktrue +% \everypar{\global\@nobreakfalse\everypar{}}% +% \fi +% \endgroup +% \fi} +%\newcommand*\l@section[2]{% +% \ifnum \c@tocdepth >\z@ +% \addpenalty\@secpenalty +% \addvspace{1.0em \@plus\p@}% +% \setlength\@tempdima{1.5em}% +% \begingroup +% \parindent \z@ \rightskip \@pnumwidth +% \parfillskip -\@pnumwidth +% \leavevmode \bfseries +% \advance\leftskip\@tempdima +% \hskip -\leftskip +% #1\nobreak\hfil \nobreak\hb@xt@\@pnumwidth{\hss #2}\par +% \endgroup +% \fi} +%\newcommand*\l@subsection{\@dottedtocline{2}{1.5em}{2.3em}} +%\newcommand*\l@subsubsection{\@dottedtocline{3}{3.8em}{3.2em}} +%\newcommand*\l@paragraph{\@dottedtocline{4}{7.0em}{4.1em}} +%\newcommand*\l@subparagraph{\@dottedtocline{5}{10em}{5em}} +%\newcommand\listoffigures{% +% \section*{\listfigurename +% \@mkboth{\MakeUppercase\listfigurename}% +% {\MakeUppercase\listfigurename}}% +% \@starttoc{lof}% +% } +%\newcommand*\l@figure{\@dottedtocline{1}{1.5em}{2.3em}} +%\newcommand\listoftables{% +% \section*{\listtablename +% \@mkboth{% +% \MakeUppercase\listtablename}{\MakeUppercase\listtablename}}% +% \@starttoc{lot}% +% } +%\let\l@table\l@figure +%====================================== +%ENVIRONMENTS +%====================================== +%ENVIRONMENT: indented +\newenvironment{indented}{\begin{indented}}{\end{indented}} +\newenvironment{varindent}[1]{\begin{varindent}{#1}}{\end{varindent}} +% +\def\indented{\list{}{\itemsep=0\p@\labelsep=0\p@\itemindent=0\p@ + \labelwidth=0\p@\leftmargin=\mathindent\topsep=0\p@\partopsep=0\p@ + \parsep=0\p@\listparindent=15\p@}\footnotesize\rm} +\let\endindented=\endlist +\def\varindent#1{\setlength{\varind}{#1}% + \list{}{\itemsep=0\p@\labelsep=0\p@\itemindent=0\p@ + \labelwidth=0\p@\leftmargin=\varind\topsep=0\p@\partopsep=0\p@ + \parsep=0\p@\listparindent=15\p@}\footnotesize\rm} +\let\endvarindent=\endlist +%ENVIRONMENT: abstract +\newenvironment{abstract}{% + \vspace{16pt plus3pt minus3pt} + \begin{indented} + \item[]{\bfseries \abstractname.}\quad\rm\ignorespaces} + {\end{indented}\vspace{10mm}} +%ENVIRONMENT: description +\newenvironment{description} + {\list{}{\labelwidth\z@ \itemindent-\leftmargin + \let\makelabel\descriptionlabel}} + {\endlist} +\newcommand\descriptionlabel[1]{\hspace\labelsep + \normalfont\bfseries #1} +%ENVIRONMENT: quotation +\newenvironment{quotation} + {\list{}{\listparindent 1.5em% + \itemindent \listparindent + \rightmargin \leftmargin + \parsep \z@ \@plus\p@}% + \item[]} + {\endlist} +%ENVIRONMENT: quote +\newenvironment{quote} + {\list{}{\rightmargin\leftmargin}% + \item[]} + {\endlist} +%ENVIRONMENT: verse +\newenvironment{verse} + {\let\\=\@centercr + \list{}{\itemsep \z@ + \itemindent -1.5em% + \listparindent\itemindent + \rightmargin \leftmargin + \advance\leftmargin 1.5em}% + \item[]} + {\endlist} +%ENVIRONMENT: bibliography +\newdimen\bibindent +\setlength\bibindent{1.5em} +\def\thebibliography#1{\list + {\hfil[\arabic{enumi}]}{\topsep=0\p@\parsep=0\p@ + \partopsep=0\p@\itemsep=0\p@ + \labelsep=5\p@\itemindent=-10\p@ + \settowidth\labelwidth{\footnotesize[#1]}% + \leftmargin\labelwidth + \advance\leftmargin\labelsep + \advance\leftmargin -\itemindent + \usecounter{enumi}}\footnotesize + \def\newblock{\ } + \sloppy\clubpenalty4000\widowpenalty4000 + \sfcode`\.=1000\relax} +\let\endthebibliography=\endlist +\def\numrefs#1{\begin{thebibliography}{#1}} +\def\endnumrefs{\end{thebibliography}} +\let\endbib=\endnumrefs +%%%%%%%%%%%%%%%%%% + + +%\newenvironment{thebibliography}[1] +% {\section*{References} +% \list{\@biblabel{\@arabic\c@enumiv}}% +% {\settowidth\labelwidth{\@biblabel{#1}}% +% \leftmargin\labelwidth +% \advance\leftmargin\labelsep +% \@openbib@code +% \usecounter{enumiv}% +% \let\p@enumiv\@empty +% \renewcommand\theenumiv{\@arabic\c@enumiv}}% +% \sloppy +% \clubpenalty4000 +% \@clubpenalty \clubpenalty +% \widowpenalty4000% +% \sfcode`\.\@m} +% {\def\@noitemerr +% {\@latex@warning{Empty `thebibliography' environment}}% +% \endlist} +%\newcommand\newblock{\hskip .11em\@plus.33em\@minus.07em} +%\let\@openbib@code\@empty +%ENVIRONMENT: theindex +\newenvironment{theindex} + {\if@twocolumn + \@restonecolfalse + \else + \@restonecoltrue + \fi + \columnseprule \z@ + \columnsep 35\p@ + \twocolumn[\section*{\indexname}]% + \@mkboth{\MakeUppercase\indexname}% + {\MakeUppercase\indexname}% + \thispagestyle{plain}\parindent\z@ + \parskip\z@ \@plus .3\p@\relax + \let\item\@idxitem} + {\if@restonecol\onecolumn\else\clearpage\fi} +\newcommand\@idxitem{\par\hangindent 40\p@} +\newcommand\subitem{\@idxitem \hspace*{20\p@}} +\newcommand\subsubitem{\@idxitem \hspace*{30\p@}} +\newcommand\indexspace{\par \vskip 10\p@ \@plus5\p@ \@minus3\p@\relax} +%===================== +\def\appendix{\@ifnextchar*{\@appendixstar}{\@appendix}} +\def\@appendix{\eqnobysec\@appendixstar} +\def\@appendixstar{\@@par + \ifnumbysec % Added 30/4/94 to get Table A1, + \@addtoreset{table}{section} % Table B1 etc if numbering by + \@addtoreset{figure}{section}\fi % section + \setcounter{section}{0} + \setcounter{subsection}{0} + \setcounter{subsubsection}{0} + \setcounter{equation}{0} + \setcounter{figure}{0} + \setcounter{table}{0} + \def\thesection{Appendix \Alph{section}} + \def\theequation{\ifnumbysec + \Alph{section}.\arabic{equation}\else + \Alph{section}\arabic{equation}\fi} % Comment A\arabic{equation} maybe + \def\thetable{\ifnumbysec % better? 15/4/95 + \Alph{section}\arabic{table}\else + A\arabic{table}\fi} + \def\thefigure{\ifnumbysec + \Alph{section}\arabic{figure}\else + A\arabic{figure}\fi}} +\def\noappendix{\setcounter{figure}{0} + \setcounter{table}{0} + \def\thetable{\arabic{table}} + \def\thefigure{\arabic{figure}}} +\setlength\arraycolsep{5\p@} +\setlength\tabcolsep{6\p@} +\setlength\arrayrulewidth{.4\p@} +\setlength\doublerulesep{2\p@} +\setlength\tabbingsep{\labelsep} +\skip\@mpfootins = \skip\footins +\setlength\fboxsep{3\p@} +\setlength\fboxrule{.4\p@} +\renewcommand\theequation{\arabic{equation}} +% NAME OF STRUCTURES +\newcommand\contentsname{Contents} +\newcommand\listfigurename{List of Figures} +\newcommand\listtablename{List of Tables} +\newcommand\refname{References} +\newcommand\indexname{Index} +\newcommand\figurename{Figure} +\newcommand\tablename{Table} +\newcommand\partname{Part} +\newcommand\appendixname{Appendix} +\newcommand\abstractname{Abstract} +%Miscellaneous commands +\newcommand{\BibTeX}{{\rm B\kern-.05em{\sc i\kern-.025em b}\kern-.08em + T\kern-.1667em\lower.7ex\hbox{E}\kern-.125emX}} +\newcommand{\jpcsit}{{\bfseries\itshape\selectfont Journal of Physics: Conference Series}} +\newcommand{\jpcs}{{\itshape\selectfont Journal of Physics: Conference Series}} +\newcommand{\iopp}{IOP Publishing} +\newcommand{\cls}{{\upshape\selectfont\texttt{jpconf.cls}}} +\newcommand{\corg}{conference organizer} +\newcommand\today{\number\day\space\ifcase\month\or + January\or February\or March\or April\or May\or June\or + July\or August\or September\or October\or November\or December\fi + \space\number\year} + \setlength\columnsep{10\p@} +\setlength\columnseprule{0\p@} +\newcommand{\Tables}{\clearpage\section*{Tables and table captions} +\def\fps@table{hp}\noappendix} +\newcommand{\Figures}{\clearpage\section*{Figure captions} + \def\fps@figure{hp}\noappendix} +% +\newcommand{\Figure}[1]{\begin{figure} + \caption{#1} + \end{figure}} +% +\newcommand{\Table}[1]{\begin{table} + \caption{#1} + \begin{indented} + \lineup + \item[]\begin{tabular}{@{}l*{15}{l}}} +\def\endTable{\end{tabular}\end{indented}\end{table}} +\let\endtab=\endTable +% +\newcommand{\fulltable}[1]{\begin{table} + \caption{#1} + \lineup + \begin{tabular*}{\textwidth}{@{}l*{15}{@{\extracolsep{0pt plus 12pt}}l}}} +\def\endfulltable{\end{tabular*}\end{table}} +%BIBLIOGRAPHY and References +%\newcommand{\Bibliography}[1]{\section*{References}\par\numrefs{#1}} +%\newcommand{\References}{\section*{References}\par\refs} +%\def\thebibliography#1{\list +% {\hfil[\arabic{enumi}]}{\topsep=0\p@\parsep=0\p@ +% \partopsep=0\p@\itemsep=0\p@ +% \labelsep=5\p@\itemindent=-10\p@ +% \settowidth\labelwidth{\footnotesize[#1]}% +% \leftmargin\labelwidth +% \advance\leftmargin\labelsep +% \advance\leftmargin -\itemindent +% \usecounter{enumi}}\footnotesize +% \def\newblock{\ } +% \sloppy\clubpenalty4000\widowpenalty4000 +% \sfcode`\.=1000\relax} +%\let\endthebibliography=\endlist +%\def\numrefs#1{\begin{thebibliography}{#1}} +%\def\endnumrefs{\end{thebibliography}} +%\let\endbib=\endnumrefs + +\def\thereferences{\list{}{\topsep=0\p@\parsep=0\p@ + \partopsep=0\p@\itemsep=0\p@\labelsep=0\p@\itemindent=-18\p@ +\labelwidth=0\p@\leftmargin=18\p@ +}\footnotesize\rm +\def\newblock{\ } +\sloppy\clubpenalty4000\widowpenalty4000 +\sfcode`\.=1000\relax}% +\let\endthereferences=\endlist +% MISC EQUATRION STUFF +%\def\[{\relax\ifmmode\@badmath\else +% \begin{trivlist} +% \@beginparpenalty\predisplaypenalty +% \@endparpenalty\postdisplaypenalty +% \item[]\leavevmode +% \hbox to\linewidth\bgroup$ \displaystyle +% \hskip\mathindent\bgroup\fi} +%\def\]{\relax\ifmmode \egroup $\hfil \egroup \end{trivlist}\else \@badmath \fi} +%\def\equation{\@beginparpenalty\predisplaypenalty +% \@endparpenalty\postdisplaypenalty +%\refstepcounter{equation}\trivlist \item[]\leavevmode +% \hbox to\linewidth\bgroup $ \displaystyle +%\hskip\mathindent} +%\def\endequation{$\hfil \displaywidth\linewidth\@eqnnum\egroup \endtrivlist} +%\@namedef{equation*}{\[} +%\@namedef{endequation*}{\]} +%\def\eqnarray{\stepcounter{equation}\let\@currentlabel=\theequation +%\global\@eqnswtrue +%\global\@eqcnt\z@\tabskip\mathindent\let\\=\@eqncr +%\abovedisplayskip\topsep\ifvmode\advance\abovedisplayskip\partopsep\fi +%\belowdisplayskip\abovedisplayskip +%\belowdisplayshortskip\abovedisplayskip +%\abovedisplayshortskip\abovedisplayskip +%$$\halign to +%\linewidth\bgroup\@eqnsel$\displaystyle\tabskip\z@ +% {##{}}$&\global\@eqcnt\@ne $\displaystyle{{}##{}}$\hfil +% &\global\@eqcnt\tw@ $\displaystyle{{}##}$\hfil +% \tabskip\@centering&\llap{##}\tabskip\z@\cr} +%\def\endeqnarray{\@@eqncr\egroup +% \global\advance\c@equation\m@ne$$\global\@ignoretrue } +%\mathindent = 6pc +%% +%\def\eqalign#1{\null\vcenter{\def\\{\cr}\openup\jot\m@th +% \ialign{\strut$\displaystyle{##}$\hfil&$\displaystyle{{}##}$\hfil +% \crcr#1\crcr}}\,} +%% +%\def\eqalignno#1{\displ@y \tabskip\z@skip +% \halign to\displaywidth{\hspace{5pc}$\@lign\displaystyle{##}$% +% \tabskip\z@skip +% &$\@lign\displaystyle{{}##}$\hfill\tabskip\@centering +% &\llap{$\@lign\hbox{\rm##}$}\tabskip\z@skip\crcr +% #1\crcr}} +%% +\newif\ifnumbysec +\def\theequation{\ifnumbysec + \arabic{section}.\arabic{equation}\else + \arabic{equation}\fi} +\def\eqnobysec{\numbysectrue\@addtoreset{equation}{section}} +\newcounter{eqnval} +\def\numparts{\addtocounter{equation}{1}% + \setcounter{eqnval}{\value{equation}}% + \setcounter{equation}{0}% + \def\theequation{\ifnumbysec + \arabic{section}.\arabic{eqnval}{\it\alph{equation}}% + \else\arabic{eqnval}{\it\alph{equation}}\fi}} +\def\endnumparts{\def\theequation{\ifnumbysec + \arabic{section}.\arabic{equation}\else + \arabic{equation}\fi}% + \setcounter{equation}{\value{eqnval}}} +% +\def\cases#1{% + \left\{\,\vcenter{\def\\{\cr}\normalbaselines\openup1\jot\m@th% + \ialign{\strut$\displaystyle{##}\hfil$&\tqs + \rm##\hfil\crcr#1\crcr}}\right.}% +\def\eqalign#1{\null\vcenter{\def\\{\cr}\openup\jot\m@th + \ialign{\strut$\displaystyle{##}$\hfil&$\displaystyle{{}##}$\hfil + \crcr#1\crcr}}\,} +% OTHER USEFUL BITS +\newcommand{\e}{\mathrm{e}} +\newcommand{\rme}{\mathrm{e}} +\newcommand{\rmi}{\mathrm{i}} +\newcommand{\rmd}{\mathrm{d}} +\renewcommand{\qquad}{\hspace*{25pt}} +\newcommand{\tdot}[1]{\stackrel{\dots}{#1}} % Added 1/9/94 +\newcommand{\tqs}{\hspace*{25pt}} +\newcommand{\fl}{\hspace*{-\mathindent}} +\newcommand{\Tr}{\mathop{\mathrm{Tr}}\nolimits} +\newcommand{\tr}{\mathop{\mathrm{tr}}\nolimits} +\newcommand{\Or}{\mathord{\mathrm{O}}} %changed from \mathop 20/1/95 +\newcommand{\lshad}{[\![} +\newcommand{\rshad}{]\!]} +\newcommand{\case}[2]{{\textstyle\frac{#1}{#2}}} +\def\pt(#1){({\it #1\/})} +\newcommand{\dsty}{\displaystyle} +\newcommand{\tsty}{\textstyle} +\newcommand{\ssty}{\scriptstyle} +\newcommand{\sssty}{\scriptscriptstyle} +\def\lo#1{\llap{${}#1{}$}} +\def\eql{\llap{${}={}$}} +\def\lsim{\llap{${}\sim{}$}} +\def\lsimeq{\llap{${}\simeq{}$}} +\def\lequiv{\llap{${}\equiv{}$}} +% +\newcommand{\eref}[1]{(\ref{#1})} +%\newcommand{\eqref}[1]{Equation (\ref{#1})} +%\newcommand{\Eqref}[1]{Equation (\ref{#1})} +\newcommand{\sref}[1]{section~\ref{#1}} +\newcommand{\fref}[1]{figure~\ref{#1}} +\newcommand{\tref}[1]{table~\ref{#1}} +\newcommand{\Sref}[1]{Section~\ref{#1}} +\newcommand{\Fref}[1]{Figure~\ref{#1}} +\newcommand{\Tref}[1]{Table~\ref{#1}} +\newcommand{\opencircle}{\mbox{\Large$\circ\,$}} % moved Large outside maths +\newcommand{\opensquare}{\mbox{$\rlap{$\sqcap$}\sqcup$}} +\newcommand{\opentriangle}{\mbox{$\triangle$}} +\newcommand{\opentriangledown}{\mbox{$\bigtriangledown$}} +\newcommand{\opendiamond}{\mbox{$\diamondsuit$}} +\newcommand{\fullcircle}{\mbox{{\Large$\bullet\,$}}} % moved Large outside maths +\newcommand{\fullsquare}{\,\vrule height5pt depth0pt width5pt} +\newcommand{\dotted}{\protect\mbox{${\mathinner{\cdotp\cdotp\cdotp\cdotp\cdotp\cdotp}}$}} +\newcommand{\dashed}{\protect\mbox{-\; -\; -\; -}} +\newcommand{\broken}{\protect\mbox{-- -- --}} +\newcommand{\longbroken}{\protect\mbox{--- --- ---}} +\newcommand{\chain}{\protect\mbox{--- $\cdot$ ---}} +\newcommand{\dashddot}{\protect\mbox{--- $\cdot$ $\cdot$ ---}} +\newcommand{\full}{\protect\mbox{------}} + +\def\;{\protect\psemicolon} +\def\psemicolon{\relax\ifmmode\mskip\thickmuskip\else\kern .3333em\fi} +\def\lineup{\def\0{\hbox{\phantom{0}}}% + \def\m{\hbox{$\phantom{-}$}}% + \def\-{\llap{$-$}}} +% +%%%%%%%%%%%%%%%%%%%%% +% Tables rules % +%%%%%%%%%%%%%%%%%%%%% + +\newcommand{\boldarrayrulewidth}{1\p@} +% Width of bold rule in tabular environment. + +\def\bhline{\noalign{\ifnum0=`}\fi\hrule \@height +\boldarrayrulewidth \futurelet \@tempa\@xhline} + +\def\@xhline{\ifx\@tempa\hline\vskip \doublerulesep\fi + \ifnum0=`{\fi}} + +% +% Rules for tables with extra space around +% +\newcommand{\br}{\ms\bhline\ms} +\newcommand{\mr}{\ms\hline\ms} +% +\newcommand{\centre}[2]{\multispan{#1}{\hfill #2\hfill}} +\newcommand{\crule}[1]{\multispan{#1}{\hspace*{\tabcolsep}\hrulefill + \hspace*{\tabcolsep}}} +\newcommand{\fcrule}[1]{\ifnum\thetabtype=1\multispan{#1}{\hrulefill + \hspace*{\tabcolsep}}\else\multispan{#1}{\hrulefill}\fi} +% +% Extra spaces for tables and displayed equations +% +\newcommand{\ms}{\noalign{\vspace{3\p@ plus2\p@ minus1\p@}}} +\newcommand{\bs}{\noalign{\vspace{6\p@ plus2\p@ minus2\p@}}} +\newcommand{\ns}{\noalign{\vspace{-3\p@ plus-1\p@ minus-1\p@}}} +\newcommand{\es}{\noalign{\vspace{6\p@ plus2\p@ minus2\p@}}\displaystyle}% +% +\newcommand{\etal}{{\it et al\/}\ } +\newcommand{\dash}{------} +\newcommand{\nonum}{\par\item[]} %\par added 1/9/93 +\newcommand{\mat}[1]{\underline{\underline{#1}}} +% +% abbreviations for IOPP journals +% +\newcommand{\CQG}{{\it Class. Quantum Grav.} } +\newcommand{\CTM}{{\it Combust. Theory Modelling\/} } +\newcommand{\DSE}{{\it Distrib. Syst. Engng\/} } +\newcommand{\EJP}{{\it Eur. J. Phys.} } +\newcommand{\HPP}{{\it High Perform. Polym.} } % added 4/5/93 +\newcommand{\IP}{{\it Inverse Problems\/} } +\newcommand{\JHM}{{\it J. Hard Mater.} } % added 4/5/93 +\newcommand{\JO}{{\it J. Opt.} } +\newcommand{\JOA}{{\it J. Opt. A: Pure Appl. Opt.} } +\newcommand{\JOB}{{\it J. Opt. B: Quantum Semiclass. Opt.} } +\newcommand{\JPA}{{\it J. Phys. A: Math. Gen.} } +\newcommand{\JPB}{{\it J. Phys. B: At. Mol. Phys.} } %1968-87 +\newcommand{\jpb}{{\it J. Phys. B: At. Mol. Opt. Phys.} } %1988 and onwards +\newcommand{\JPC}{{\it J. Phys. C: Solid State Phys.} } %1968--1988 +\newcommand{\JPCM}{{\it J. Phys.: Condens. Matter\/} } %1989 and onwards +\newcommand{\JPD}{{\it J. Phys. D: Appl. Phys.} } +\newcommand{\JPE}{{\it J. Phys. E: Sci. Instrum.} } +\newcommand{\JPF}{{\it J. Phys. F: Met. Phys.} } +\newcommand{\JPG}{{\it J. Phys. G: Nucl. Phys.} } %1975--1988 +\newcommand{\jpg}{{\it J. Phys. G: Nucl. Part. Phys.} } %1989 and onwards +\newcommand{\MSMSE}{{\it Modelling Simulation Mater. Sci. Eng.} } +\newcommand{\MST}{{\it Meas. Sci. Technol.} } %1990 and onwards +\newcommand{\NET}{{\it Network: Comput. Neural Syst.} } +\newcommand{\NJP}{{\it New J. Phys.} } +\newcommand{\NL}{{\it Nonlinearity\/} } +\newcommand{\NT}{{\it Nanotechnology} } +\newcommand{\PAO}{{\it Pure Appl. Optics\/} } +\newcommand{\PM}{{\it Physiol. Meas.} } % added 4/5/93 +\newcommand{\PMB}{{\it Phys. Med. Biol.} } +\newcommand{\PPCF}{{\it Plasma Phys. Control. Fusion\/} } % added 4/5/93 +\newcommand{\PSST}{{\it Plasma Sources Sci. Technol.} } +\newcommand{\PUS}{{\it Public Understand. Sci.} } +\newcommand{\QO}{{\it Quantum Opt.} } +\newcommand{\QSO}{{\em Quantum Semiclass. Opt.} } +\newcommand{\RPP}{{\it Rep. Prog. Phys.} } +\newcommand{\SLC}{{\it Sov. Lightwave Commun.} } % added 4/5/93 +\newcommand{\SST}{{\it Semicond. Sci. Technol.} } +\newcommand{\SUST}{{\it Supercond. Sci. Technol.} } +\newcommand{\WRM}{{\it Waves Random Media\/} } +\newcommand{\JMM}{{\it J. Micromech. Microeng.\/} } +% +% Other commonly quoted journals +% +\newcommand{\AC}{{\it Acta Crystallogr.} } +\newcommand{\AM}{{\it Acta Metall.} } +\newcommand{\AP}{{\it Ann. Phys., Lpz.} } +\newcommand{\APNY}{{\it Ann. Phys., NY\/} } +\newcommand{\APP}{{\it Ann. Phys., Paris\/} } +\newcommand{\CJP}{{\it Can. J. Phys.} } +\newcommand{\JAP}{{\it J. Appl. Phys.} } +\newcommand{\JCP}{{\it J. Chem. Phys.} } +\newcommand{\JJAP}{{\it Japan. J. Appl. Phys.} } +\newcommand{\JP}{{\it J. Physique\/} } +\newcommand{\JPhCh}{{\it J. Phys. Chem.} } +\newcommand{\JMMM}{{\it J. Magn. Magn. Mater.} } +\newcommand{\JMP}{{\it J. Math. Phys.} } +\newcommand{\JOSA}{{\it J. Opt. Soc. Am.} } +\newcommand{\JPSJ}{{\it J. Phys. Soc. Japan\/} } +\newcommand{\JQSRT}{{\it J. Quant. Spectrosc. Radiat. Transfer\/} } +\newcommand{\NC}{{\it Nuovo Cimento\/} } +\newcommand{\NIM}{{\it Nucl. Instrum. Methods\/} } +\newcommand{\NP}{{\it Nucl. Phys.} } +\newcommand{\PL}{{\it Phys. Lett.} } +\newcommand{\PR}{{\it Phys. Rev.} } +\newcommand{\PRL}{{\it Phys. Rev. Lett.} } +\newcommand{\PRS}{{\it Proc. R. Soc.} } +\newcommand{\PS}{{\it Phys. Scr.} } +\newcommand{\PSS}{{\it Phys. Status Solidi\/} } +\newcommand{\PTRS}{{\it Phil. Trans. R. Soc.} } +\newcommand{\RMP}{{\it Rev. Mod. Phys.} } +\newcommand{\RSI}{{\it Rev. Sci. Instrum.} } +\newcommand{\SSC}{{\it Solid State Commun.} } +\newcommand{\ZP}{{\it Z. Phys.} } +%=================== +\pagestyle{headings} +\pagenumbering{arabic} +\raggedbottom +\onecolumn +\endinput +%% +%% End of file `jconf.cls'. diff --git a/contributions/ds_devops_pe/Artifact/jpconf11.clo b/contributions/ds_devops_pe/Artifact/jpconf11.clo new file mode 100644 index 0000000000000000000000000000000000000000..63541cbb98638b86bbc1df2d09f4eafbe3233a42 --- /dev/null +++ b/contributions/ds_devops_pe/Artifact/jpconf11.clo @@ -0,0 +1,141 @@ +%% +%% This is file `jpconf11.clo' +%% +%% This file is distributed in the hope that it will be useful, +%% but WITHOUT ANY WARRANTY; without even the implied warranty of +%% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +%% +%% \CharacterTable +%% {Upper-case \A\B\C\D\E\F\G\H\I\J\K\L\M\N\O\P\Q\R\S\T\U\V\W\X\Y\Z +%% Lower-case \a\b\c\d\e\f\g\h\i\j\k\l\m\n\o\p\q\r\s\t\u\v\w\x\y\z +%% Digits \0\1\2\3\4\5\6\7\8\9 +%% Exclamation \! Double quote \" Hash (number) \# +%% Dollar \$ Percent \% Ampersand \& +%% Acute accent \' Left paren \( Right paren \) +%% Asterisk \* Plus \+ Comma \, +%% Minus \- Point \. Solidus \/ +%% Colon \: Semicolon \; Less than \< +%% Equals \= Greater than \> Question mark \? +%% Commercial at \@ Left bracket \[ Backslash \\ +%% Right bracket \] Circumflex \^ Underscore \_ +%% Grave accent \` Left brace \{ Vertical bar \| +%% Right brace \} Tilde \~} +\ProvidesFile{jpconf11.clo}[2005/05/04 v1.0 LaTeX2e file (size option)] +\renewcommand\normalsize{% + \@setfontsize\normalsize\@xipt{13}% + \abovedisplayskip 12\p@ \@plus3\p@ \@minus7\p@ + \abovedisplayshortskip \z@ \@plus3\p@ + \belowdisplayshortskip 6.5\p@ \@plus3.5\p@ \@minus3\p@ + \belowdisplayskip \abovedisplayskip + \let\@listi\@listI} +\normalsize +\newcommand\small{% + \@setfontsize\small\@xpt{12}% + \abovedisplayskip 11\p@ \@plus3\p@ \@minus6\p@ + \abovedisplayshortskip \z@ \@plus3\p@ + \belowdisplayshortskip 6.5\p@ \@plus3.5\p@ \@minus3\p@ + \def\@listi{\leftmargin\leftmargini + \topsep 9\p@ \@plus3\p@ \@minus5\p@ + \parsep 4.5\p@ \@plus2\p@ \@minus\p@ + \itemsep \parsep}% + \belowdisplayskip \abovedisplayskip} +\newcommand\footnotesize{% +% \@setfontsize\footnotesize\@xpt\@xiipt + \@setfontsize\footnotesize\@ixpt{11}% + \abovedisplayskip 10\p@ \@plus2\p@ \@minus5\p@ + \abovedisplayshortskip \z@ \@plus3\p@ + \belowdisplayshortskip 6\p@ \@plus3\p@ \@minus3\p@ + \def\@listi{\leftmargin\leftmargini + \topsep 6\p@ \@plus2\p@ \@minus2\p@ + \parsep 3\p@ \@plus2\p@ \@minus\p@ + \itemsep \parsep}% + \belowdisplayskip \abovedisplayskip +} +\newcommand\scriptsize{\@setfontsize\scriptsize\@viiipt{9.5}} +\newcommand\tiny{\@setfontsize\tiny\@vipt\@viipt} +\newcommand\large{\@setfontsize\large\@xivpt{18}} +\newcommand\Large{\@setfontsize\Large\@xviipt{22}} +\newcommand\LARGE{\@setfontsize\LARGE\@xxpt{25}} +\newcommand\huge{\@setfontsize\huge\@xxvpt{30}} +\let\Huge=\huge +\if@twocolumn + \setlength\parindent{14\p@} + \else + \setlength\parindent{18\p@} +\fi +\if@letterpaper% +%\input{letmarg.tex}% +\setlength{\hoffset}{0mm} +\setlength{\marginparsep}{0mm} +\setlength{\marginparwidth}{0mm} +\setlength{\textwidth}{160mm} +\setlength{\oddsidemargin}{-0.4mm} +\setlength{\evensidemargin}{-0.4mm} +\setlength{\voffset}{0mm} +\setlength{\headheight}{8mm} +\setlength{\headsep}{5mm} +\setlength{\footskip}{0mm} +\setlength{\textheight}{230mm} +\setlength{\topmargin}{1.6mm} +\else +%\input{a4marg.tex}% +\setlength{\hoffset}{0mm} +\setlength{\marginparsep}{0mm} +\setlength{\marginparwidth}{0mm} +\setlength{\textwidth}{160mm} +\setlength{\oddsidemargin}{-0.4mm} +\setlength{\evensidemargin}{-0.4mm} +\setlength{\voffset}{0mm} +\setlength{\headheight}{8mm} +\setlength{\headsep}{5mm} +\setlength{\footskip}{0mm} +\setlength{\textheight}{230mm} +\setlength{\topmargin}{1.6mm} +\fi +\setlength\maxdepth{.5\topskip} +\setlength\@maxdepth\maxdepth +\setlength\footnotesep{8.4\p@} +\setlength{\skip\footins} {10.8\p@ \@plus 4\p@ \@minus 2\p@} +\setlength\floatsep {14\p@ \@plus 2\p@ \@minus 4\p@} +\setlength\textfloatsep {24\p@ \@plus 2\p@ \@minus 4\p@} +\setlength\intextsep {16\p@ \@plus 4\p@ \@minus 4\p@} +\setlength\dblfloatsep {16\p@ \@plus 2\p@ \@minus 4\p@} +\setlength\dbltextfloatsep{24\p@ \@plus 2\p@ \@minus 4\p@} +\setlength\@fptop{0\p@} +\setlength\@fpsep{10\p@ \@plus 1fil} +\setlength\@fpbot{0\p@} +\setlength\@dblfptop{0\p@} +\setlength\@dblfpsep{10\p@ \@plus 1fil} +\setlength\@dblfpbot{0\p@} +\setlength\partopsep{3\p@ \@plus 2\p@ \@minus 2\p@} +\def\@listI{\leftmargin\leftmargini + \parsep=\z@ + \topsep=6\p@ \@plus3\p@ \@minus3\p@ + \itemsep=3\p@ \@plus2\p@ \@minus1\p@} +\let\@listi\@listI +\@listi +\def\@listii {\leftmargin\leftmarginii + \labelwidth\leftmarginii + \advance\labelwidth-\labelsep + \topsep=3\p@ \@plus2\p@ \@minus\p@ + \parsep=\z@ + \itemsep=\parsep} +\def\@listiii{\leftmargin\leftmarginiii + \labelwidth\leftmarginiii + \advance\labelwidth-\labelsep + \topsep=\z@ + \parsep=\z@ + \partopsep=\z@ + \itemsep=\z@} +\def\@listiv {\leftmargin\leftmarginiv + \labelwidth\leftmarginiv + \advance\labelwidth-\labelsep} +\def\@listv{\leftmargin\leftmarginv + \labelwidth\leftmarginv + \advance\labelwidth-\labelsep} +\def\@listvi {\leftmargin\leftmarginvi + \labelwidth\leftmarginvi + \advance\labelwidth-\labelsep} +\endinput +%% +%% End of file `iopart12.clo'. diff --git a/contributions/ds_devops_pe/CI-tools.png b/contributions/ds_devops_pe/CI-tools.png new file mode 100644 index 0000000000000000000000000000000000000000..87bf09f581de94df2711caab507dbb66ae040500 Binary files /dev/null and b/contributions/ds_devops_pe/CI-tools.png differ diff --git a/contributions/ds_devops_pe/ds_devops_pe.tex b/contributions/ds_devops_pe/ds_devops_pe.tex index deb9d6a911c20706f7a542792d6a2c2197550177..094930ec3332d349511cebf9d313a41fcc9cd88f 100644 --- a/contributions/ds_devops_pe/ds_devops_pe.tex +++ b/contributions/ds_devops_pe/ds_devops_pe.tex @@ -1,154 +1,238 @@ \documentclass[a4paper]{jpconf} \usepackage{graphicx} \begin{document} -\title{Common software lifecycle management in external projects: placeholder} +\title{Common software lifecycle management in external projects} \author{C. Duma$^1$, A. Costantini$^1$, D. Michelotto$^1$, - P. Orviz$^2$ and D. Salomoni$^1$} -\address{$^1$INFN Division CNAF, Bologna, Italy} -\address{$^2$IFCA, Consejo Superior de Investigaciones Cientificas-CSIC, Santander, Spain} + P. Orviz$^2$, D. Salomoni$^1$} +\address{$^1$ INFN-CNAF, Bologna, IT} +\address{$^2$ IFCA, Consejo Superior de Investigaciones Cientificas-CSIC, Santander, SP} \ead{ds@cnaf.infn.it} \begin{abstract} - This paper describes the achievements of the H2020 project INDIGO-DataCloud in the field of software lifecycle management, the Continuous Integration and Delivery systems setup to manage the new releases, as a first step towards the implementation of a DevOps approach. +This paper describes the common procedure defined and adopted in the field of Software Lifecycle Management and + Continuous Integration and Delivery to manage the new releases, as a first step to ensure +the quality of the provided solutions, services and components, while strengthening the collaboration between +developers and operations teams among different external projects. +In particular, the paper analyses the common software lifecycle management procedure developed during the +INDIO-DataCloud project and recently improved and adopted in two EC funded +projects: eXtreme DataCloud and DEEP Hybrid DataCloud. \end{abstract} \section{Introduction} -Text -\section{First section} -\label{sec:release} +The eXtreme-DataCloud (XDC) \cite{xdc} and DEEP-HybridDataCloud (DEEP-HDC) \cite{deep} projects are aimed at addressing requirements from a wide range of User Communities belonging to several disciplines and test the developed software solutions against the real life use cases. +The software solutions carried out by the both projects are released as Open Source and are based on already existing components (TRL8+) that the projects will enrich with new functionalities and plugins. -TOCHANGE -The software development lifecycle (SDL) process (Figure~\ref{fig:1}) in INDIGO has been supported by a continuous -software improvement process that regarded the software quality assurance, software maintenance, -including release management, support services, and the management of pilot infrastructures -needed for software integration and acceptance testing. - -%\begin{figure} -% \centering -% \includegraphics[width=\textwidth]{Figure5.pdf} -% \caption{Software development lifecycle implementation} -% \label{fig:1} -%\end{figure} +The use of standards and protocols widely available on the state-of-the-art distributed computing ecosystems may be not enough to guarantee that the released components can be easily plugged into the European e-Infrastructures and in general on cloud based computing environments and the definition and implementation of the entire Software +Lifecycle Management process becames mandatory in such projects. +As the software components envisaged by both projects have a history of development + in previous successful European projects (such as the INDIGO-DataCloud \cite{indigo} project) implementing different types of modern software development techniques, + the natural choice was to complement the previous, individual, Continuous Development and Integration services +with a Continuous Testing, Deployment and Monitoring as part of a DevOps approach: +\begin{itemize} +\item Continuous Testing - the activity of continuously testing the developed software in order to identify issues in + the early phases of the development. For Continuous testing, automation tools will be used. These tools enable +the QA’s for testing multiple code-bases and in parallel, to ensure that there are no flaws in the functionality. In +this activity the use of Docker containers for simulating testing environments on the fly, is also a preferred choice. +Once the code is tested, it is continuously integrated with the existing code. +\item Continuous Deployment - the activity of continuously updating production environment once new code is made +available. Here we ensure that the code is correctly deployed on all the servers. If there is any addition of functionality + or a new feature is introduced, then one should be ready to add resources according to needs. Therefore, it is also +the responsibility of the SysAdmin to scale up the servers. Since the new code is deployed on a continuous basis, +automation tools play an important role for executing tasks quickly and frequently. Puppet, Chef, SaltStack and +Ansible are some popular tools that could be used at this step. This activity represents the Configuration +Management - the process of standardising the resources configurations and enforcing their state across + infrastructures in an automated manner. The extensive use of containerisation techniques will provide an +entire runtime environment: application/service, all its dependencies, libraries and binaries, and configuration +files needed to run it, bundled in one package - container. T3.1 will also manage the scalability testing, being + able to manage the configurations and do the deployments of any number of nodes automatically. +\item Continuous Monitoring - very crucial activity in the DevOps model of managing software lifecycle, which is +aimed at improving the quality of the software by monitoring its performance. This practice involves the participation +of the Operations team who will monitor the users’ activity to discover bugs or improper behavior of the system. + This can also be achieved by making use of dedicated monitoring tools, which will continuously monitor the application +performance and highlight issues. Some popular tools useful in this step are Nagios \cite{nagios}, NewRelic \cite{newrelic} +and Sensu \cite{sensu}. These tools + help to monitor the health of the system proactively and improve productivity and increase the reliability of the +systems, reducing IT support costs. Any major issues found could be reported to the Development team to be +fixed in the continuous development phase. +\end{itemize} -Preview releases are made available for evaluation by user communities and -resource providers through the pilot infrastructures. Release -candidates are subjected to integration testing, which may include the +These DevOps activities are carried out on loop continuously until the desired product quality is achieved. +Automation will play a central role in all the activities in order to achieve a complete release automation, moving the +software from the developers through build and quality assurance checks, to deployment into integration testbeds +and finally to production sites part of the Pilot Infrastructures. +In the following sections, an overview of the recentli defined best practices that have been adopted in both XDC and DEEP-XDC projects for the Software Lifecycle Management and Continuous Integration and Delivery are presented and described. -%\subsection{Software development lifecycle management} -Software lifecycle management is performed mostly via automated actions orchestrated. +\section{Software Quality Assurance and Control} -In Figure we depict the project's software lifecycle management services and -activities and their interdependencies: -\begin{itemize} -\item Version Control System (VCS) - Source Code is made available through public VCS - repositories, hosted externally in GitHub repositories, guaranteeing in this - way the software openness and visibility, simplifying the exploitation beyond the - project lifetime. The INDIGO-DataCloud software is released under the Apache 2.0 - software license and can be deployed on both public and private Cloud infrastructures. -\item Software quality assurence criteria and control activities and services to enable them: +Software Quality Assurance (SQA) covers the set of software engineering processes +that foster the quality and reliability in the software produced. The activities involved in this task are mainly focused on: \begin{itemize} -\item Continuous Integration service using {\bf Jenkins}: Service to automate the building, - packaging (where applicable) and execution of unit and functional tests of software components. -\item Code review service using GitHub: Code review of software source code is one integral part of the SQA\@. This service facilitates the code review proces. It records the - comments and allows the reviewer to verify the software modification. -\item Code metrics services using {\bf Grimoire}: To collect and visualize several metrics about the software components. +\item Defining and maintaining a common SQA procedure to guide the software development efforts throughout its life cycle. +\item Formulating a representative set of metrics for the software quality control to follow up on the behavior of the +software produced, aiming to detect and fix early deviations in the software produced. +\item Enabling a continuous integration process, eventually complemented by a continuous delivery scenario, promoting +the automation adoption for the testing, building, deployment and release activities. \end{itemize} -\item Software release and maintenance activities, services and supporting infrastructures + +In order to define the SQA process, the specific context of the software developed in the project has to be taken into account. +The following particularities characterize the corresponding development teams: \begin{itemize} -\item A project management service using {\bf openproject.org} is made available by the - project: It provides tools such as an issue tracker, wiki, a placeholder for documents and a project management timeline. -\item Artifacts repositories for RPM and Debian packages, and Docker Hub for containers: - In INDIGO-DataCloud there are two types of artifacts, packaged software and virtual images. - The software can be downloaded from our public repository\footnote{http://repo.indigo-datacloud.eu}. -\item Release notes, installation and configuration guides, user and development manuals are made - available on {\bf GitBook}\footnote{https://indigo-dc.gitbooks.io/indigo-datacloud-releases}. -\item Bug trackers using GitHub issues tracker: Services to track issues and bugs of INDIGO-DataCloud software components. -\item Integration infrastructure: this infrastructure is composed of computing resources to support directly - the Continuous Integration service. It's the place where building and packaging of software - occurs as well as the execution of unit and functional tests. These resources are provided by INDIGO partners. -\item Testing infrastructure: this infrastructure aims to provide several types of environment. A stable environment - for users where they can preview the software and services developed by INDIGO-DataCloud, prior to its public release. -\item Preview infrastructure: where the released artifacts are deployed and made available for testing and validation by the use-cases. +\item Heterogeneous developer profiles: different backgrounds and different degrees of expertise. +\item Geographically distributed. +\item Different home institutes which implies different cultures, different development technologies, process and methods. +\item High turnover due to the limited duration of the projects where the grid software has been developed so far. +\item More focus on development activities, with limited resources, if any, available for quality assurance activities. \end{itemize} +The Quality Assurance process has to take all above described factors into account to define the Software Quality Assurance Plan (SQAP). +A set of "QA Policies†have also to be defined to guide the development teams towards uniform practices and processes. +These QA Policies define the main activities of the software lifecycle, such as releasing, tracking, packaging and documenting + the software carried out by the project. This is done in collaboration with development teams, making sure they are flexible +enough to co-exist as much as possible with current development methods. The SQA activities have to be monitored and controlled +to track their evolution and put in place corrective countermeasures in case of deviations. + +Moreover, a quality model have to be defined +to help in evaluating the software products and process quality. It helps to set quality goals for software products and processes. +The Quality Model has to follow the ISO/IEC 25010:2011 “Systems and software engineering - Systems and software +Quality Requirements and Evaluation (SQuaRE) - System and software quality models†\cite{R18} to identify a set of characteristics (criteria) +that need to be present in software products and processes to be able to meet the quality requirements. + +Those SQA criteria \ref{R22} have the goal to +\begin{itemize} +\item Enhance the visibility, accessibility and distribution of the produced source code through the alignment with to the Open Source Definition \cite{R23}. +\item Promote code style standards to deliver good quality source code emphasizing its readability and reusability. +\item Improve the quality and reliability of software by covering different testing methods at development and pre-production stages. +\item Propose a change-based driven scenario where all new updates in the source code are continuously evaluated by the automated execution of the relevant tests. +\item Adopt an agile approach to effectively produce timely and audience-specific documentation. +\item Lower the barriers of software adoption by delivering quality documentation and the utilization of automated deployment solutions. +\item Encourage secure coding practices and security static analysis at the development phase while providing recommendations on external security assessment. \end{itemize} -The first INDIGO-DataCloud major release (codename {\tt MidnightBlue}) was released 1st of August 2016 (see table~\ref{tab:1} for the fact sheet). The -second INDIGO-DataCloud major release (codename {\tt ElectricIndigo}) was made publicly available on April 14th 2017 (see table~\ref{tab:2} for the fact sheet). +\section{Software Maintenance and Support} +Regarding the software maintenance and support area of the software lifecycle management, + the main objectives that should be and described in the Maintenance plan are: +\begin{itemize} +\item To increase the quality levels of the software by contributing to the implementation and automation +of the Quality Assurance (QA) and Control procedures defined by the project. +\item To boost the software delivery process, relying on automation. +\item To emphasize the communication and feedback with/from end users, in order to guarantee adequate + requirements gathering and support. +\item To guarantee the stability of services already deployed in production and the increase of their readiness + levels, where needed. +\end{itemize} +Moreover the common practices deal with the definition of those processes and procedures related to the software maintenance and +support and their continuous execution: +\begin{itemize} +\item Software Maintenance - regarding software preparation \& transition from the developers to production +repositories and final users. +\item Problem Management - providing the analysis \& documentation of problems. +\item Change Management - control code, configuration changes, retirement calendars. +\item Coordination the provisioning of adequate support to released software. +\item Responsible for the release management and coordination and the maintenance of the artifacts + repositories, defining policies and release cycles. +\end{itemize} +The plan regarding the software maintenance and support management have to follow the guidelines of the +ISO/IEC 14764:2006 standard \cite{R30}, and includes a set of organizational and administrative roles to handle +maintenance implementation, change management and validation, software release, migration and retirement, support +and helpdesk activities. +Component releases are classified in major, minor, revision and emergency, based on the impact of the changes on the +component interface and behavior. Requests for Change (RfC) are managed adopting a priority-driven approach, +so that the risk of compromising the stability of the software deployed in a production environment is minimized. +The User Support activity deals, instead, with the coordination of the support to the users that make use of the software components (developed within the project activities) and included in the main project software distributions. + +\section{Services for continuous integration and SQA} -\section{DevOps approach in INDIGO} +To support the Software Quality Assurance, the +Continuous Integration and the software release and maintenance activities, a set of tools and services are needed. +Usually, those tools and services are provided by using publicly available cloud services due to the following reasons: +\begin{itemize} +\item Higher public visibility and in line with project objectives for open source software, +\item Provides a path to further development, support and exploitation beyond the end of the project, +\item Smaller effort needed inside the project to operate and manage those services. +\end{itemize} +The list of services needed is given in Table 1 with a small description for each service and the related Web link. -Progressive levels of automation were adopted throughout the different phases of -the INDIGO-DataCloud project software development and delivery processes. +\begin{figure}[h!] +\centering +Table 1: Tools and services to support DevOps. +\includegraphics[width=10cm,clip]{CI-tools.png} +%\caption{The list of services.} +\label{citools} +\end{figure} -\subsection{Services for continuous integration and SQA} -The INDIGO-DataCloud CI process is schematically shown -in Figure~\ref{fig:3}. The process, in its different steps, reflects some of -the main and important achievements of the software integration team, such as: +\section{Key Performance Indicators} +Defining appropriate KPIs for maintenance, release and support activities, and monitor them during the project lifetime +may help in highlight the project achievements and put in place the appropriate corrective actions in case of deviations. +In principle, the KPIs should address the following impact areas and reflect the related goal: +\begin{itemize} +\item Prepare data and computing e-Infrastructures to absorb the needs of communities that push the envelope in terms of data and intensive computing +\begin{itemize} +\item Goal: Extending the quality \& quantity of services provided by e-infrastructures +\end{itemize} +\item Promote new research possibilities in Europe \begin{itemize} - \item New features are developed independently from the - production version in \textit{feature branches}. The creation of - a pull request for a specific feature branch marks the start of - the automated validation process through the execution of the - SQA jobs. - - \item The SQA jobs perform the code style verification and calculate unit - and functional test coverage. - \begin{itemize} - \item The tools necessary for tackling these tests are packaged in - Docker images, available in DockerHub. - \item Each test then initiates a new container that provides a - clean environment for its execution. - \item This is an innovative approach that provides the flexibility - needed to cope with the INDIGO-DataCloud software diversity. - \end{itemize} - - \item The results of the several SQA jobs are made available in the Jenkins - service which notifies back to GitHub their exit status. - \begin{itemize} - \item Only if the tests have succeeded, the source code is - validated and is ready to be merged into the production branch. - \end{itemize} - - \item The last step in the workflow is the code review, where a human - review of the change is performed. After code review the source code - can be merged and becomes ready for integration and later release. +\item Goal: Increasing the capacity for innovation and production of new knowledge \end{itemize} +\end{itemize} + +\section{Conclusions} +The paper describes the common procedures to be applied in the field of software lifecycle management, aimed at managing + the new releases and ensure the quality of the provided solutions, services and components provided by the project. +In particular, the paper described the best practices to adopt in order to i) foster the quality and reliability of the software produced, +ii) to define the processes and procedures regarding the software maintenance and support, iii) identify the services needed +to support the Software Quality Assurance, the Continuous Integration and the software release and maintenance +and iv) define appropriate KPIs to monitor the project achievements. +The experience gathered throughout this activity with regards to +The adoption of different DevOps practices is becaming mandatory for software development projects, +the experience gathered throughout this activity can be also applicable to the development and distribution of software products coming, for example, from the user communities and other software product activities. -As a general rule, the described CI process must be followed by all the PTs -contributing code to INDIGO-DataCloud. However there are exceptions to this rule that fall into two main categories: -\subsection{Continuous delivery} -Continuous delivery adds, on top of the software development chain, a seamless -manufacturing of software packages ready to be deployed into production -services. Therefore, fast, frequent and small releases can be taken over thus -promoting the reliability of the software. +\section*{Acknowledgments} +DEEP-HybridDataCloud has been funded by the European Commission H2020 research and innovation program under grant agreement RIA 777435. +eXtreme DataCloud has been funded by the European Commission H2020 research and innovation program under grant agreement RIA 777367. -\subsection{DevOps adoption from user communities} -The experience gathered throughout the project with regards to the adoption of different DevOps -practices is not only useful and suitable for the software related to the core services in the -INDIGO-DataCloud solution, but also applicable to the development and distribution of the applications coming from the user communities. +\section{References} -\section{Conclusions} +\begin{thebibliography}{} -Thanks to the new common solutions developed by the INDIGO project, teams of first-line -researchers in Europe are using public and private Cloud resources to get new results in Physics, Biology, Astronomy, Medicine, Humanities and other disciplines. +\bibitem{xdc} +Web site: www.extreme-datacloud.eu +\bibitem{deep} +Web site: www.deep-hybrid-datacloud.eu +\bibitem{indigo} +Web site: www.indigo-datacloud.eu +\bibitem{nagios} +Web site: https://www.nagios.org +\bibitem{newrelic} +Web site: https://newrelic.com +\bibitem{sensu} +Web site: https://sensu.io +\bibitem{R18} +ISO/IEC 25010:2011, “Systems and software engineering - Systems and software Quality Requirements and Evaluation (SQuaRE) - System and software quality models": https://www.iso.org/standard/35733.html +\bibitem{R22} +A set of Common Software Quality Assurance Baseline Criteria for Research Projects, http://digital.csic.es/bitstream/10261/160086/4/CommonSQA-v2.pdf +\bibitem{R23} +The Open Source Definition, https://opensource.org/osd +\bibitem{R30} +ISO/IEC 14764:2006 standard, https://www.iso.org/standard/39064.html + + + + +\end{thebibliography} -\section*{Acknowledgments} -DEEP-HybridDataCloud has been funded by the European Commision H2020 research and innovation program under grant agreement RIA XXXXXXX. -eXtreme DataCloud has been funded by the European Commision H2020 research and innovation program under grant agreement RIA XXXXXXX. \end{document} diff --git a/contributions/ds_infn_cc/ds_infn_cc.tex b/contributions/ds_infn_cc/ds_infn_cc.tex index 4a4ae6b97a55db9413f57c49adc2c47447f5cd56..e5f1a285e9695a44699cb2f90ac8faefbe22fc61 100644 --- a/contributions/ds_infn_cc/ds_infn_cc.tex +++ b/contributions/ds_infn_cc/ds_infn_cc.tex @@ -3,9 +3,9 @@ \begin{document} \title{INFN Corporate Cloud - Management and Evolution} -\author{C. Duma$^1$, A. Costantini$^1$, D. Michelotto$^1$ and D. Salomoni$^1$} +\author{C. Duma$^1$, A. Costantini$^1$, D. Michelotto$^1$, D. Salomoni$^1$} -\address{$^1$INFN Division CNAF, Bologna, Italy} +\address{$^1$ INFN-CNAF, Bologna, IT} %\address{$^2$IFCA, Consejo Superior de Investigaciones Cientificas-CSIC, Santander, Spain} \ead{ds@cnaf.infn.it} @@ -47,9 +47,9 @@ represents a single, though distributed, administrative domain. As already mentioned, INFN Corporate Cloud (INFN-CC) is the INFN geographically distributed private Cloud infrastructure aimed at providing services starting from the IaaS level and it is based on OpenStack that has been deployed in three of the -major INFN data-centres in Italy (INFN-CNAF, INFN-Bari and INFN-LNF). INFN-CC has a twofold purpose: on one hand its fully +major INFN data centers in Italy (INFN-CNAF, INFN-Bari and INFN-LNF). INFN-CC has a twofold purpose: on one hand its fully redundant architecture and its resiliency characteristics make of it theperfect environment for providing critical network services - for the INFN community, on the other hand the fact that it is hosted in modern and large data-centres makes of INFN-CC the + for the INFN community, on the other hand the fact that it is hosted in modern and large data centers makes of INFN-CC the platform of choice for a number of scientific computing use cases. INFN-CC also deploys a higher PaaS layer, developed within the EU funded project INDIGO-DataCloud \cite{indigo-dc}, in order to provide to the INFN scientific communities not only an easier access solution to computing and storage resources, but also both automatic instantiation and configuration diff --git a/contributions/farming/ARFarming2018.tex b/contributions/farming/ARFarming2018.tex index e43c66c32abcf15ba022416cbc5d7c10c2707958..6c94cbb42318b8201be60c7275f5b7eff974a5f5 100644 --- a/contributions/farming/ARFarming2018.tex +++ b/contributions/farming/ARFarming2018.tex @@ -11,22 +11,19 @@ %%%%%%%%%% End TeXmacs macros \begin{document} -\title{The INFN-Tier1: the computing farm} +\title{The INFN-Tier 1: the computing farm} -\author{Andrea Chierici} -\ead{andrea.chierici@cnaf.infn.it} +\author{Andrea Chierici$^1$, S. Dal Pra$^1$, D. Michelotto$^1$} -\author{Stefano Dal Pra} -\ead{stefano.dalpra@cnaf.infn.it} +\address{$^1$ INFN-CNAF, Bologna, IT} -\author{Diego Michelotto} -\ead{diego.michelotto@cnaf.infn.it} +\ead{andrea.chierici@cnaf.infn.it, stefano.dalpra@cnaf.infn.it, diego.michelotto@cnaf.infn.it} %\begin{abstract} %\end{abstract} \section{Introduction} -The farming group is responsible for the management of the computing resources of the centre. This implies the deployment of installation and configuration services, monitoring facilities and to fairly distribute the resources to the experiments that have agreed to run at CNAF. +The farming group is responsible for the management of the computing resources of the center. This implies the deployment of installation and configuration services, monitoring facilities and to fairly distribute the resources to the experiments that have agreed to run at CNAF. %\begin{figure} %\centering @@ -134,7 +131,7 @@ has been set up to work as master and dbfarm-2 works as a Hot standby replica. With this configuration the master is the main database, while the replica can be accessed in read only mode. This instance is used to host the accounting database of the farming, the inventory of -the hardware of the T1-centre (docet) and a database used by the CUPID +the hardware of the Tier 1 center (docet) and a database used by the CUPID experiment. The content of this database is updated directly by authorized users of the experiment, while jobs running on our worker nodes can access its data from the standby replica. @@ -195,7 +192,7 @@ In particular, the Buyers Group reiterated the need for a fully functioning clou \bibitem{DGAS} Dal Pra, Stefano. ``Accounting Data Recovery. A Case Report from INFN-T1'' Nota interna, Commissione Calcolo e Reti dell'INFN, {\tt CCR-48/2014/P} - \bibitem{DOCET} Dal Pra, Stefano, and Alberto Crescente. ``The data operation centre tool. Architecture and population strategies'' Journal of Physics: Conference Series. Vol. 396. No. 4. IOP Publishing, 2012. + \bibitem{DOCET} Dal Pra, Stefano, and Alberto Crescente. ``The data operation center tool. Architecture and population strategies'' Journal of Physics: Conference Series. Vol. 396. No. 4. IOP Publishing, 2012. \end{thebibliography} \end{document} diff --git a/contributions/fermi/fermi.tex b/contributions/fermi/fermi.tex index 709111fb612d1450fe6326032cdef115d219dcf4..a3c5113c25a58c5a5c71013f3a5a3d53daee4433 100644 --- a/contributions/fermi/fermi.tex +++ b/contributions/fermi/fermi.tex @@ -6,12 +6,12 @@ \title{The \Fermi-LAT experiment} \author{ -M Kuss$^{1}$, -F Longo$^{2}$, +M. Kuss$^{1}$, +F. Longo$^{2}$, on behalf of the \Fermi LAT collaboration} -\address{$^{1}$ Istituto Nazionale di Fisica Nucleare, Sezione di Pisa, I-56127 Pisa, Italy} -\address{$^{2}$ Department of Physics, University of Trieste, via Valerio 2, Trieste and INFN, Sezione di Trieste, via Valerio 2, Trieste, Italy} +\address{$^{1}$ INFN Sezione di Pisa, Pisa, IT} +\address{$^{2}$ University of Trieste and INFN Sezione di Trieste, Trieste, IT} \ead{michael.kuss@pi.infn.it} \begin{abstract} diff --git a/contributions/gamma/gamma.tex b/contributions/gamma/gamma.tex index ff2845b86d9b58e4dd43bee1ca0751695dacc5b9..ba2c2da15bf0454ff1c1960696761cf72811d2d2 100644 --- a/contributions/gamma/gamma.tex +++ b/contributions/gamma/gamma.tex @@ -51,7 +51,7 @@ What is the density and isospin dependence of the nuclear equation of state? \noindent AGATA \cite{ref:gamma_first,ref:gamma_second} is the European Advanced Gamma Tracking Array for nuclear spectroscopy project consisting of a full shell of high purity segmented germanium detectors. Being fully instrumented with digital electronics it exploits the novel technique of gamma-ray tracking. AGATA will be employed at all the large-scale radioactive and stable beam facilities and in the long-term will be fully completed in 60 detectors unit geometry, in order to realize the envisaged scientific program. AGATA is being realized in phases with the goal of completing the first phase with 20 units by 2020. AGATA has been successfully operated since 2009 at LNL, GSI and GANIL, taking advantage of different beams and powerful ancillary detector systems. It will be used in LNL again in 2022, with stable beams and later with SPES radioactive beams, and in future years is planned to be installed in GSI/FAIR, Jyvaskyla, GANIL again, and HIE-ISOLDE. \section{AGATA computing model and the role of CNAF} -At present the array consists of 15 units, each composed by a cluster of 3 HPGe crystals. Each individual crystal is composed of 36 segments for a total of 38 associated electronics channels/crystal. The data acquisition rate, including Pulse Shape Analysis, can stand up to 4/5 kHz events per crystal. The bottleneck is presently the Pulse Shape Analysis procedure to extract the interaction positions from the HPGe detectors traces. With future faster processor one expects to be able to process the PSA at 10 kHz/crystal. The amount of raw data per experiment, including traces, is about 20 TB for a standard data taking of about 1 week and can increase to 50 TB for specific experimental configuration. The collaboration is thus acquiring locally about 250 TB of data per year. During data-taking raw data is temporarily stored in a computer farm located at the experimental site and, later on, it is dispatched on the GRID in two different centers, CCIN2P3 (Lyon) and CNAF (INFN Bologna), used as TIER1: the duplication process is a security in case of failures/losses of one of the TIER1. +At present the array consists of 15 units, each composed by a cluster of 3 HPGe crystals. Each individual crystal is composed of 36 segments for a total of 38 associated electronics channels/crystal. The data acquisition rate, including Pulse Shape Analysis, can stand up to 4/5 kHz events per crystal. The bottleneck is presently the Pulse Shape Analysis procedure to extract the interaction positions from the HPGe detectors traces. With future faster processor one expects to be able to process the PSA at 10 kHz/crystal. The amount of raw data per experiment, including traces, is about 20 TB for a standard data taking of about 1 week and can increase to 50 TB for specific experimental configuration. The collaboration is thus acquiring locally about 250 TB of data per year. During data-taking raw data is temporarily stored in a computer farm located at the experimental site and, later on, it is dispatched on the GRID in two different centers, CCIN2P3 (Lyon) and CNAF (INFN Bologna), used as Tier 1: the duplication process is a security in case of failures/losses of one of the Tier 1 sites. The GRID itself is seldom used to re-process the data and the users usually download their data set to local storage where they can run emulators able to manage part or the full workflow. diff --git a/contributions/icarus/report_2018.tex b/contributions/icarus/report_2018.tex index a3e25dbc658c70f74792bf8c850313e2684f3d96..9aa352487eec672e0fce5920861d381a5d74e448 100644 --- a/contributions/icarus/report_2018.tex +++ b/contributions/icarus/report_2018.tex @@ -5,8 +5,8 @@ \begin{document} \title{ICARUS} -\author{A. Rappoldi, on behalf of the ICARUS Collaboration} -\address{INFN, Sez. di Pavia, via Bassi, 6, 27100 Pavia, Italy} +\author{A. Rappoldi$^1$, on behalf of the ICARUS Collaboration} +\address{$^1$ INFN Sezione di Pavia, Pavia, IT} \ead{andrea.rappoldi@pv.infn.it} @@ -210,11 +210,11 @@ is scheduled for the 2019. All the data (raw and reduced) will be stored on the Fermilab using local facility; however, the ICARUS collaboration agreed to have a mirror site in Italy -(located at CNAF INFN Tier1) where to retain a full replica of the preselected +(located at CNAF INFN Tier 1) where to retain a full replica of the preselected raw data, both to have redundancy and provide a more direct data access to european part of the collaboration. -The CNAF Tier-1 computing resources assigned to ICARUS for 2018 consist of: +The CNAF Tier 1 computing resources assigned to ICARUS for 2018 consist of: 4000 HSPEC of CPU, 500 TB of disk storage and 1500 TB of tape archive. A small fraction of the available storage has been used to @@ -225,7 +225,7 @@ During 2018 the ICARUS T600 detector was still in preparation, so only a limited fraction of such resorces has been used, mainly to perform data transfer tests (from FNAL to CNAF) and to check the installation of LArSoft framework -in the Tier-1 environment. For this last purpose, a dedicate virtual +in the Tier 1 environment. For this last purpose, a dedicate virtual machine with custom environment was also used. diff --git a/contributions/km3net/km3net.tex b/contributions/km3net/km3net.tex index bee6b6b61807d88e66522b22f50898cc7badf1d8..32dfda8cef924d88c8a6156c0609d697cf1a1f62 100644 --- a/contributions/km3net/km3net.tex +++ b/contributions/km3net/km3net.tex @@ -6,13 +6,13 @@ \author{C. Bozza$^1$, T. Chiarusi$^2$, K. Graf$^3$, A. Martini$^4$ for the KM3NeT Collaboration} -\address{$ˆ1$ Department of Physics of the University of Salerno and INFN Gruppo Collegato di Salerno, via Giovanni Paolo II 132, 84084 Fisciano, Italy} +\address{$ˆ1$ University of Salerno and INFN Gruppo Collegato di Salerno, Fisciano (SA), IT} -\address{$ˆ2$ INFN, Sezione di Bologna, v.le C. Berti-Pichat, 6/2, Bologna 40127, Italy} +\address{$ˆ2$ INFN Sezione di Bologna, Bologna, IT} -\address{$ˆ3$ Friedrich-Alexander-Universit{\"a}t Erlangen-N{\"u}rnberg, Erlangen Centre for Astroparticle Physics, Erwin-Rommel-Stra{\ss}e 1, 91058 Erlangen, Germany} +\address{$ˆ3$ Friedrich-Alexander-Universit{\"a}t Erlangen-N{\"u}rnberg, Erlangen, GE} -\address{$ˆ4$ INFN, LNF, Via Enrico Fermi, 40, Frascati, 00044 Italy} +\address{$ˆ4$ INFN-LNF, Frascati, IT} \ead{cbozza@unisa.it} @@ -24,7 +24,7 @@ from astrophysical sources; the ORCA programme is devoted to investigate the ordering of neutrino mass eigenstates. The unprecedented size of detectors will imply PByte-scale datasets and calls for large computing facilities and high-performance data -centres. The data management and processing challenges of KM3NeT are +centers. The data management and processing challenges of KM3NeT are reviewed as well as the computing model. Specific attention is given to describing the role and contributions of CNAF. \end{abstract} @@ -80,7 +80,7 @@ way. One ORCA DU was also deployed and operated in 2017, with smooth data flow and processing. At present time, most of the computing load is due to simulations for the full building block, now being enriched with feedback from real data analysis. As a first step, this -was done at CC-IN2P3 in Lyon, but usage of other computing centres is +was done at CC-IN2P3 in Lyon, but usage of other computing centers is increasing and is expected to soon spread to the full KM3NeT computing landscape. This process is being driven in accordance to the goals envisaged in setting up the computing model. The KM3NeT @@ -105,14 +105,14 @@ flow with a reduction from $5 GB/s$ to $5 MB/s$ per \emph{building block}. Quasi-on-line reconstruction is performed for selected events (alerts, monitoring). The output data are temporarily stored on a persistent medium and distributed with fixed latency (typically less -than few hours) to various computing centres, which altogether +than few hours) to various computing centers, which altogether constitute Tier 1, where events are reconstructed by various fitting models (mostly searching for shower-like or track-like patterns). Reconstruction further reduces the data rate to about $1 MB/s$ per \emph{building block}. In addition, Tier 1 also takes care of continuous detector calibration, to optimise pointing accuracy (by working out the detector shape that changes because of water currents) -and photomultiplier operation. Local analysis centres, logically +and photomultiplier operation. Local analysis centers, logically allocated in Tier 2 of the computing model, perform physics analysis tasks. A database system interconnects the three tiers by distributing detector structure, qualification and calibration data, run @@ -124,10 +124,10 @@ book-keeping information, and slow-control and monitoring data. \label{fig:compmodel} \end{figure} -KM3NeT exploits computing resources in several centres and in the +KM3NeT exploits computing resources in several centers and in the GRID, as sketched in Fig.~\ref{fig:compmodel}. The conceptually simple flow of the three-tier model is then realised by splitting the tasks -of Tier 1 to different processing centres, also optimising the data +of Tier 1 to different processing centers, also optimising the data flow and the network path. In particular, CNAF and CC-IN2P3 aim at being mirrors of each other, containing the full data set at any moment. The implementation for the data transfer from CC-IN2P3 to CNAF (via an @@ -144,9 +144,9 @@ for a while becuse of the lack of human resources. \section{Data size and CPU requirements} Calibration and reconstruction work in batches. The raw data related -to the batch are transferred to the centre that is in charge of the +to the batch are transferred to the center that is in charge of the processing before it starts. In addition, a rolling buffer of data is -stored at each computing centre, e.g.\ the last year of data taking. +stored at each computing center, e.g.\ the last year of data taking. Simulation has special needs because the input is negligible, but the computing power required is very large compared to the needs of @@ -211,7 +211,7 @@ resources at CNAF has been so far below the figures for a units are added in the following years. KM3NeT software that runs on the GRID can use CNAF computing nodes in opportunistic mode. -Already now, the data handling policy to safeguard the products of Tier-0 +Already now, the data handling policy to safeguard the products of Tier 0 is in place. Automatic synchronization from each shore station to both CC-IN2P3 and CNAF runs daily and provides two maximally separated paths from the data production site to final storage places. Mirroring @@ -219,11 +219,11 @@ and redundancy preservation between CC-IN2P3 and CNAF are foreseen and currently at an early stage. CNAF has already added relevant contributions to KM3NeT in terms of -know-how for IT solution deployment, e.g.~the above-mentioned synchronisation, software development solutions and the software-defined network at the Tier-0 at +know-how for IT solution deployment, e.g.~the above-mentioned synchronisation, software development solutions and the software-defined network at the Tier 0 at the Italian site. Setting up Software Defined Networks (SDN) for data acquisition deserves a special mention. The SDN technology\cite{SDN} is used to configure and operate the mission-critical fabric of switches/routers -that interconnects all the on-shore resources in Tier-0 stations. The +that interconnects all the on-shore resources in Tier 0 stations. The KM3NeT DAQ is built around switches compliant with the OpenFlow 1.3 protocol and managed by dedicated controller servers. With a limited number of Layer-2 forwarding rules, developed on purpose for the KM3NeT diff --git a/contributions/lhcb/lhcb.tex b/contributions/lhcb/lhcb.tex index 82d0e51782441e7e5e9ff3d4264430728df8a7a4..afa0071e066a9c88be99e017c8b3440cb266f44a 100644 --- a/contributions/lhcb/lhcb.tex +++ b/contributions/lhcb/lhcb.tex @@ -3,20 +3,17 @@ \begin{document} \title{LHCb Computing at CNAF} -\author{Stefano Perazzini} +\author{S. Perazzini$^1$, C. Bozzi$^{2,3}$} -\address{INFN Sezione di Bologna, viale Berti Pichat 6/2, 40127 Bologna (BO), Italy E-mail: Stefano.Perazzini@bo.infn.it} +\address{$^1$ INFN Sezione di Bologna, Bologna, IT} +\address{$^2$ CERN, Gen\`eve, CH} +\address{$^3$ INFN Sezione di Ferrara, Ferrara, IT} -\author{Concezio Bozzi} - -\address{CERN, EP/LBD, CH-1211 Geneve 23, Switzerland, and INFN Sezione di Ferrara, via Saragat 1, 44122 Ferrara, Italy E-mail: Concezio.Bozzi@fe.infn.it} - - -\ead{bozzi@fe.infn.it} +\ead{stefano.perazzini@bo.infn.it, concezio.bozzi@fe.infn.it} \begin{abstract} -In this document a summary of the LHCb computing activities during the 2018 is reported. The usage of the CPU, disk and tape resources spread among various computing centres is analysed, with particular attention to the performances of the INFN Tier 1 at CNAF. Projections of the necessary resources in the years to come are also briefly discussed. +In this document a summary of the LHCb computing activities during the 2018 is reported. The usage of the CPU, disk and tape resources spread among various computing centers is analysed, with particular attention to the performances of the INFN Tier 1 at CNAF. Projections of the necessary resources in the years to come are also briefly discussed. \end{abstract} \section{Introduction} @@ -44,7 +41,7 @@ The offline reconstruction of the FULL stream for proton collision data run from A full re-stripping of 2015, 2016 and 2017 proton collision data, started in autumn 2017, ended in April 2018. A stripping cycle of 2015 lead collision data was also performed in that period. The stripping cycle concurrent with the 2018 proton collision data taking started in June and run continuously until November. -The INFN Tier1 centre at CNAF was in downtime from November 2017, due to a major flood incident. However, the site was again fully available in March 2018, allowing the completion of the stripping cycles on hold, waiting for the data located at CNAF (about 20\% of the total). Despite the unavailability of CNAF resources for the first months of 2018 the site performed excellently for the rest of the year, as testified by the number reported in this report. +The INFN Tier 1 center at CNAF was in downtime from November 2017, due to a major flood incident. However, the site was again fully available in March 2018, allowing the completion of the stripping cycles on hold, waiting for the data located at CNAF (about 20\% of the total). Despite the unavailability of CNAF resources for the first months of 2018 the site performed excellently for the rest of the year, as testified by the number reported in this report. As in previous years, LHCb continued to make use of opportunistic resources, that are not pledged to WLCG, but that significantly contributed to the overall usage. @@ -67,27 +64,27 @@ Total WLCG & 502 & 41.3 & 90.5\\ \hline \label{tab:pledges} \end{table} -The usage of WLCG CPU resources by LHCb is obtained from the different views provided by the EGI Accounting portal. The CPU usage is presented in Figure~\ref{fig:T0T1} for the Tier0 and Tier1s and in Figure~\ref{fig:T2} for Tier2s . The same data is presented in tabular form in Table~\ref{tab:T0T1} and Table~\ref{tab:T2}, respectively. +The usage of WLCG CPU resources by LHCb is obtained from the different views provided by the EGI Accounting portal. The CPU usage is presented in Figure~\ref{fig:T0T1} for the Tier 0 and Tier 1 sites and in Figure~\ref{fig:T2} for Tier 2 sites. The same data is presented in tabular form in Table~\ref{tab:T0T1} and Table~\ref{tab:T2}, respectively. \begin{figure} \begin{center} \includegraphics[width=0.8\textwidth]{T0T1.png} \end{center} -\caption{\label{fig:T0T1}Monthly CPU work provided by the Tier-0 and - Tier 1 centres to LHCb during 2018.} +\caption{\label{fig:T0T1}Monthly CPU work provided by the Tier 0 and + Tier 1 centers to LHCb during 2018.} \end{figure} \begin{figure} \begin{center} \includegraphics[width=0.8\textwidth]{T2.png} \end{center} -\caption{\label{fig:T2}Monthly CPU work provided by the Tier-2 centres to LHCb during 2018.} +\caption{\label{fig:T2}Monthly CPU work provided by the Tier 2 centers to LHCb during 2018.} \end{figure} \begin{table}[htbp] - \caption{Average CPU power provided by the Tier-0 and the Tier 1 - centres to LHCb during 2018.} + \caption{Average CPU power provided by the Tier 0 and the Tier 1 + centers to LHCb during 2018.} \centering \begin{tabular}{lcc} \hline @@ -110,8 +107,8 @@ UK-T1-RAL & 71.7 & 74.8 \\ \end{table} \begin{table}[htbp] - \caption{Average CPU power provided by the Tier-2 - centres to LHCb during 2018.} + \caption{Average CPU power provided by the Tier 2 + centers to LHCb during 2018.} \centering \begin{tabular}{lcc} \hline @@ -137,21 +134,21 @@ UK & 85.7 & 29.3 \\ \label{tab:T2} \end{table} -The average power used at Tier0+Tier1s sites is about 32\% higher than the pledges. The average power used at Tier2s is about 26\% higher than the pledges. +The average power used at Tier 0 + Tier 1 sites is about 32\% higher than the pledges. The average power used at Tier 2 sites is about 26\% higher than the pledges. -The average CPU power accounted for by WLCG (including Tier0/1 + Tier2) amounts to 654 kHS06, to be compared to 502 kHS06 estimated needs quoted in Table~\ref{tab:pledges}. The Tier0 and Tier1s usage is generally higher than the pledges. The LHCb computing model is flexible enough to use computing resources for all production workflows wherever available. It is important to note that this is true also for CNAF, despite it started to contribute to the computing activities only in March, after the recovery from the incident. After that the CNAF Tier1 has offered great stability, leading to maximal efficiency in the overall exploitation of the resources. The total amount of CPU used at Tier0 and Tier1s centres is detailed in Figure~\ref{fig:T0T1_MC}, showing that about 76\% of the CPU work is due to Monte Carlo simulation. From the same plot it is visible the start of a stripping campaign in March. This corresponds to the recovery of the backlog in the restripping of the Run2 data collected in 2015-2017, due to the unavailability of CNAF after the incident of November 2017. As it is visible from the plot, the backlog has been recovered by the end of April 2018, before the restart of data-taking operations. Even if all the other Tier1s contributed to reprocess these data, the recall of them from tape has been done exclusively at CNAF. Approximately 580 TB of data have been recalled from tape in about 6 weeks, with a maximum throughput of about 250 MB/s. +The average CPU power accounted for by WLCG (including Tier 0/1 + Tier 2) amounts to 654 kHS06, to be compared to 502 kHS06 estimated needs quoted in Table~\ref{tab:pledges}. The Tier 0 and Tier 1s usage is generally higher than the pledges. The LHCb computing model is flexible enough to use computing resources for all production workflows wherever available. It is important to note that this is true also for CNAF, despite it started to contribute to the computing activities only in March, after the recovery from the incident. After that the CNAF Tier 1 has offered great stability, leading to maximal efficiency in the overall exploitation of the resources. The total amount of CPU used at Tier 0 and Tier 1s centers is detailed in Figure~\ref{fig:T0T1_MC}, showing that about 76\% of the CPU work is due to Monte Carlo simulation. From the same plot it is visible the start of a stripping campaign in March. This corresponds to the recovery of the backlog in the restripping of the Run2 data collected in 2015-2017, due to the unavailability of CNAF after the incident of November 2017. As it is visible from the plot, the backlog has been recovered by the end of April 2018, before the restart of data-taking operations. Even if all the other Tier 1 centers contributed to reprocess these data, the recall of them from tape has been done exclusively at CNAF. Approximately 580 TB of data have been recalled from tape in about 6 weeks, with a maximum throughput of about 250 MB/s. \begin{figure} \begin{center} \includegraphics[width=0.8\textwidth]{T0T1_MC.png} \end{center} -\caption{\label{fig:T0T1_MC}Usage of LHCb resources at Tier0 and Tier1s during 2018. The plot shows the normalized CPU usage (kHS06) for the various activities.} +\caption{\label{fig:T0T1_MC}Usage of LHCb resources at Tier 0 and Tier 1 sites during 2018. The plot shows the normalized CPU usage (kHS06) for the various activities.} \end{figure} Since the start of data taking in May 2018, tape storage grew by about 16.7 PB. Of these, 9.5 PB were due to new collected RAW data. The rest was due to RDST (2.6 PB) and ARCHIVE (4.6 PB), the latter due to the archival of Monte Carlo productions, re-stripping of former real data, and new Run2 data. The total tape occupancy as of December 31st 2018 is 68.9 PB, 38.4 PB of which are used for RAW data, 13.3 PB for RDST, 17.2 PB for archived data. This is 12.9\% lower than the original request of 79.2 PB. The total tape occupancy at CNAF at the end of 2018 was about 9.3 PB, of which 3.3 PB of RAW data, 3.6 PB of ARCHIVE and 2.4 of RDST. This correspond to an increase of about 2.3 PB with respect to the end of 2017. These numbers are in agreement with the share of resources expected from CNAF. \begin{table}[htbp] - \caption{Disk Storage resource usage as of February 11$^{\rm th}$ 2019 for the Tier0 and Tier1s centres. The top row is taken from the LHCb accounting, the other ones (used, available and installed capacity) are taken from the recently commissioned WLCG Storage Space Accounting tool. The 2018 pledges are shown in the last row.} + \caption{Disk Storage resource usage as of February 11$^{\rm th}$ 2019 for the Tier 0 and Tier 1 centers. The top row is taken from the LHCb accounting, the other ones (used, available and installed capacity) are taken from the recently commissioned WLCG Storage Space Accounting tool. The 2018 pledges are shown in the last row.} \begin{center} \resizebox{\columnwidth}{!}{ \begin{tabular}{|l|cc|ccccccc|} @@ -173,14 +170,14 @@ Pledge '18 & 11.4 & 26.25 & 5.61 & 4.01 & 3.20 & 1.43 & 7.32 \end{center} \end{table} -Table~\ref{tab:disk} shows the situation of disk storage resources at CERN and Tier1s, as well as at each Tier1 site, as of February 11$^{\rm th}$ 2019. The used space includes derived data, i.e. DST and micro-DST of both real and simulated data, and space reserved for users. The latter accounts for 1.2 PB in total, 0.9 of which are used. The SRR disk used and SRR disk free information concerns only permanent disk storage (previously known as “T0D1â€). The first two lines show a good agreement between what the site reports and what the LHCb accounting (first line) reports. The sum of the Tier0 and Tier1s 2018 pledges amount to 37.7 PB. The available disk space is 35 PB in total, 26 PB of which are used to store real and simulated datasets, and user data. A total of 3.7PB is used as tape buffer, the remaining 5 PB are free and will be used to store the output of the legacy stripping campaigns of Run1 and Run2 data that are currently being prepared. The disk space available at CNAF is about 6.6 PB, about 18\% above the pledge. +Table~\ref{tab:disk} shows the situation of disk storage resources at CERN and Tier 1 sites, as well as at each Tier 1 site, as of February 11$^{\rm th}$ 2019. The used space includes derived data, i.e. DST and micro-DST of both real and simulated data, and space reserved for users. The latter accounts for 1.2 PB in total, 0.9 of which are used. The SRR disk used and SRR disk free information concerns only permanent disk storage (previously known as “T0D1â€). The first two lines show a good agreement between what the site reports and what the LHCb accounting (first line) reports. The sum of the Tier 0 and Tier 1 sites 2018 pledges amount to 37.7 PB. The available disk space is 35 PB in total, 26 PB of which are used to store real and simulated datasets, and user data. A total of 3.7PB is used as tape buffer, the remaining 5 PB are free and will be used to store the output of the legacy stripping campaigns of Run1 and Run2 data that are currently being prepared. The disk space available at CNAF is about 6.6 PB, about 18\% above the pledge. In summary, the usage of computing resources in the 2018 calendar year has been quite smooth for LHCb. Simulation is the dominant activity in terms of CPU work. Additional unpledged resources, as well as clouds, on-demand and volunteer computing resources, were also successfully used. They were essential -in providing CPU work during the outage of the CNAF Tier 1 centre. As for the INFN Tier1 at CNAF, it came back to its fully-operational status in March 2018. After that, the backlog in the restripping campaign due to unavailability of data stored at CNAF was recovered, thanks also to the contribution of other sites, in time for the restart of data taking. After March 2018, CNAF operated in a very efficient and reliable way, being even able to over perform in terms of CPU power with respect to the pledged resources. +in providing CPU work during the outage of the CNAF Tier 1 center. As for the INFN Tier 1 at CNAF, it came back to its fully-operational status in March 2018. After that, the backlog in the restripping campaign due to unavailability of data stored at CNAF was recovered, thanks also to the contribution of other sites, in time for the restart of data taking. After March 2018, CNAF operated in a very efficient and reliable way, being even able to over perform in terms of CPU power with respect to the pledged resources. \section{Expected growth of resources in 2020-2021} -In terms of CPU requirements, the different activities result in CPU work estimates for 2020-2021, that are apportioned between the different Tiers taking into account the computing model constraints and also capacities that are already installed. This results in the requests shown in Table~\ref{tab:req_CPU} together with the pledged resources for 2019. The CPU work required at CNAF would correspond to about 18\% of the total CPU requested at Tier1s+Tier2s sites. +In terms of CPU requirements, the different activities result in CPU work estimates for 2020-2021, that are apportioned between the different Tiers taking into account the computing model constraints and also capacities that are already installed. This results in the requests shown in Table~\ref{tab:req_CPU} together with the pledged resources for 2019. The CPU work required at CNAF would correspond to about 18\% of the total CPU requested at Tier 1s+Tier 2s sites. \begin{table}[htbp] \centering \caption{CPU power requested at the different Tiers in 2020-2021. Pledged resources for 2019 are also reported} @@ -198,7 +195,7 @@ In terms of CPU requirements, the different activities result in CPU work estima \end{tabular} \end{table} -The forecast total disk and tape space usage at the end of the years 2019-2020 are broken down into fractions to be provided by the different Tiers. These numbers are shown in Table~\ref{tab:req_disk} for disk and Table~\ref{tab:req_tape} for tape. The disk resources required at CNAF would be about 18\% of those requested for Tier1s+Tier2s sites, while for tape storage CNAF is supposed to provide about 24\% of the total tape request to Tier1s sites. +The forecast total disk and tape space usage at the end of the years 2019-2020 are broken down into fractions to be provided by the different Tiers. These numbers are shown in Table~\ref{tab:req_disk} for disk and Table~\ref{tab:req_tape} for tape. The disk resources required at CNAF would be about 18\% of those requested for Tier 1 sites + Tier 2 sites, while for tape storage CNAF is supposed to provide about 24\% of the total tape request to Tier 1 sites. \begin{table}[htbp] \centering @@ -208,9 +205,9 @@ The forecast total disk and tape space usage at the end of the years 2019-2020 a \hline Disk (PB) & 2019 & 2020 & 2021 \\ \hline - Tier0 & 13.4 & 17.2 & 19.5 \\ + Tier 0 & 13.4 & 17.2 & 19.5 \\ Tier 1 & 29.0 & 33.2 & 39.0 \\ - Tier2 & 4 & 7.2 & 7.5 \\ + Tier 2 & 4 & 7.2 & 7.5 \\ \hline Total & 46.4 & 57.6 & 66.0 \\ \hline @@ -225,7 +222,7 @@ The forecast total disk and tape space usage at the end of the years 2019-2020 a \hline Tape (PB) & 2019 & 2020 & 2021 \\ \hline - Tier0 & 35.0 & 36.1 & 52.0 \\ + Tier 0 & 35.0 & 36.1 & 52.0 \\ Tier 1 & 53.1 & 55.5 & 90.0 \\ \hline Total & 88.1 & 91.6 & 142.0 \\ @@ -235,6 +232,6 @@ The forecast total disk and tape space usage at the end of the years 2019-2020 a \section{Conclusion} -A description of the LHCb computing activities during 2018 has been given, with particular emphasis on the usage of resources and on the forecasts of resource needs until 2021. As in previous years, the CNAF Tier1 centre gave a substantial contribution to LHCb computing in terms of CPU work and storage made available to the collaboration. This achievement is particularly important this year, as CNAF was recovering from the major incident of November 2017 that unfortunately interrupted its activities. The effects of CNAF unavailability have been overcome also thanks to extra efforts from other sites and to the opportunistic usage of non-WLCG resources. The main consequence of the incident, in terms of LHCb operations, has been the delay in the restripping campaign of data collected during 2015-2017. The data that were stored at CNAF (approximately 20\% of the total) have been processed when the site restarted the operations in March 2018. It is worth to mention that despite the delay, the restripping campaign has been completed before the start of data taking according to the predicted schedule, avoiding further stress to the LHCb computing operations. Emphasis should be put also on the fact that an almost negligible amount of data have been lost in the incident and in any case it has been possible to recover them from backup copies stored at other sites. +A description of the LHCb computing activities during 2018 has been given, with particular emphasis on the usage of resources and on the forecasts of resource needs until 2021. As in previous years, the CNAF Tier 1 center gave a substantial contribution to LHCb computing in terms of CPU work and storage made available to the collaboration. This achievement is particularly important this year, as CNAF was recovering from the major incident of November 2017 that unfortunately interrupted its activities. The effects of CNAF unavailability have been overcome also thanks to extra efforts from other sites and to the opportunistic usage of non-WLCG resources. The main consequence of the incident, in terms of LHCb operations, has been the delay in the restripping campaign of data collected during 2015-2017. The data that were stored at CNAF (approximately 20\% of the total) have been processed when the site restarted the operations in March 2018. It is worth to mention that despite the delay, the restripping campaign has been completed before the start of data taking according to the predicted schedule, avoiding further stress to the LHCb computing operations. Emphasis should be put also on the fact that an almost negligible amount of data have been lost in the incident and in any case it has been possible to recover them from backup copies stored at other sites. \end{document} diff --git a/contributions/lhcf/lhcf.tex b/contributions/lhcf/lhcf.tex index 8c2b03ae470761c7f951cb32680c6c9db9af8d3f..1ed9775270b46075d185233d7e5f0164b951a171 100644 --- a/contributions/lhcf/lhcf.tex +++ b/contributions/lhcf/lhcf.tex @@ -3,12 +3,12 @@ \begin{document} \title{The LHCf experiment} -\author{A Tiberio$^{2,1}$, O Adriani$^{2,1}$, E Berti $^{2,1}$, L Bonechi$^{1}$, M Bongi$^{2,1}$, R D'Alessandro$^{2,1}$, S Ricciarini$^{1,3}$, and A Tricomi$^{4,5}$ for the LHCf Collaboration} -\address{$^1$ INFN, Section of Florence, I-50019 Sesto Fiorentino, Florence, Italy} -\address{$^2$ Department of Physics, University of Florence, I-50019 Sesto Fiorentino, Florence, Italy} -\address{$^3$ IFAC-CNR, I-50019 Sesto Fiorentino, Florence, Italy} -\address{$^4$ INFN, Section of Catania, I-95131 Catania, Italy} -\address{$^5$ Department of Physics, University of Catania, I-95131 Catania, Italy} +\author{A. Tiberio$^{2,1}$, O. Adriani$^{2,1}$, E. Berti $^{2,1}$, L. Bonechi$^{1}$, M. Bongi$^{2,1}$, R. D'Alessandro$^{2,1}$, S. Ricciarini$^{1,3}$, A. Tricomi$^{4,5}$ for the LHCf Collaboration} +\address{$^1$ INFN Section of Florence, Sesto Fiorentino (FI), IT} +\address{$^2$ University of Florence, Sesto Fiorentino (FI), IT} +\address{$^3$ IFAC-CNR, Sesto Fiorentino (FI), IT} +\address{$^4$ INFN Section of Catania, Catania, IT} +\address{$^5$ University of Catania, Catania, IT} \ead{alessio.tiberio@fi.infn.it} diff --git a/contributions/limadou/limadou.tex b/contributions/limadou/limadou.tex index 28ec943bc65ef365d4144b66a475eb8a551fc065..c8960b47c4c146ad05cee78131f9207c39af9f11 100644 --- a/contributions/limadou/limadou.tex +++ b/contributions/limadou/limadou.tex @@ -3,9 +3,9 @@ \begin{document} \title{CSES-Limadou at CNAF} -\author{Matteo Merg\'e} +\author{Matteo Merg\'e$^1$} -\address{Agenzia Spaziale Italiana, Space Science Data Center ASI-SSDC \newline via del politecnico 1, 00133, Rome, Italy } +\address{$^1$ Agenzia Spaziale Italiana, Space Science Data Center ASI-SSDC, Rome, IT} \ead{matteo.merge@roma2.infn.it, matteo.merge@ssdc.asi.it} diff --git a/contributions/net/main.tex b/contributions/net/main.tex index f2e439cbeff94292fdbe7beace82bb346bf3c38f..bc38da1b2ab5ab1a90f2cb79ce262d371d6313b8 100644 --- a/contributions/net/main.tex +++ b/contributions/net/main.tex @@ -1,8 +1,7 @@ - \documentclass[a4paper]{jpconf} \usepackage{graphicx} \begin{document} -\title{The INFN-Tier1: Network and Security} +\title{The INFN-Tier 1: Network and Security} \author{S.~Zani$^1$, D.~De~Girolamo$^1$, L.~Chiarelli$^{1,2}$, V.~Ciaschini$^1$} \address{$^1$ INFN-CNAF, Bologna, IT} @@ -22,13 +21,13 @@ The Network unit manages the wide area and local area connections of CNAF, it is \section{Wide Area Network} -Inside CNAF datacentre is hosted the main PoP of GARR network, based on a fully managed dark fiber infrastructure. +Inside CNAF data center is hosted the main PoP of GARR network, based on a fully managed dark fiber infrastructure. CNAF is connected to the WAN via GARR/GEANT essentially with two physical links: \begin{itemize} \item General Internet: General IP link is $20 Gbps$ (2x10 Gbps) via GARR and GEANT - \item LHCOPN/LHCONE: The link to WLCG destinations is $200Gbps$ (2x100 Gbps) link shared between the LHC-OPN network for traffic with the Tier-0 (CERN) and the other Tier-1s and LHCONE network mainly for traffic with the Tier-2s. Since Summer 2018, the LHCOPN dedicated link to CERN (from Milan GARR POP) has been upgraded to 2x100 Gbps while the peering to LHCONE is at $100Gbps$ (from Milan GARR POP and GEANT GARR POP). + \item LHCOPN/LHCONE: The link to WLCG destinations is $200Gbps$ (2x100 Gbps) link shared between the LHC-OPN network for traffic with the Tier 0 (CERN) and the other Tier 1 sites and LHCONE network mainly for traffic with the Tier 2 centers. Since Summer 2018, the LHCOPN dedicated link to CERN (from Milan GARR POP) has been upgraded to 2x100 Gbps while the peering to LHCONE is at $100Gbps$ (from Milan GARR POP and GEANT GARR POP). \end{itemize} @@ -62,7 +61,7 @@ As shown in the figures~\ref{lhc-opn-usage} and \ref{gpn-usage}, the network usa \end{figure} -Currently the dedicated bandwidth for LHCOPN to CERN is 100Gbps with a backup link of 4x10Gbps. During 2019 the configuration will change and there will be provided 2x100 Gb/s links to the two CERN POP granting a better resiliency and giving potentially 200 Gbpss full speed with CERN and the Tier-1s. +Currently the dedicated bandwidth for LHCOPN to CERN is 100Gbps with a backup link of 4x10Gbps. During 2019 the configuration will change and there will be provided 2x100 Gb/s links to the two CERN POP granting a better resiliency and giving potentially 200 Gbpss full speed with CERN and the Tier 1s. \section{Data Center Interconnect with CINECA} @@ -76,7 +75,7 @@ The Cloud Express 2 are Transponders with 12 x 100 Gigabit Ethernet interfaces o The latency introduced by each CX1200 is of $\sim 5 \mu$s and the total RTT (Round Trip Time) between servers at CNAF and servers at CINECA is of 0,48 ms comparable to what we observe on the LAN (0,28 ms). -All worker nodes on the network segment at CINECA have IP addresses of the INFN Tier-1 network and are used as they were installed at the Tier-1 facility (see fig.~\ref{cineca-schema}). The data access bandwidth is 400 Gbps but can scale up to 1,2 Tbps. +All worker nodes on the network segment at CINECA have IP addresses of the INFN Tier 1 network and are used as they were installed at the Tier 1 facility (see fig.~\ref{cineca-schema}). The data access bandwidth is 400 Gbps but can scale up to 1,2 Tbps. This DCI interconnection has been implemented rapidly and as a proof of concept (this is the first time this technology has been used in Italy), now it is in production and as it is becoming a stable and relevant asset for CNAF (fig.~\ref{cineca-traffic}), it is in our plan to have a second optical fiber (between CNAF and CINECA) for resiliency reasons. @@ -85,7 +84,7 @@ This DCI interconnection has been implemented rapidly and as a proof of concept \begin{figure}[h] \begin{center} \includegraphics[width=30pc]{cineca-schema.png}\hspace{2pc}% - \caption{\label{cineca-schema}INFN Tier-1 – CINECA Data Center Interconnection.} + \caption{\label{cineca-schema}INFN Tier 1 – CINECA Data Center Interconnection.} \end{center} \end{figure} @@ -94,7 +93,7 @@ This DCI interconnection has been implemented rapidly and as a proof of concept \begin{figure}[h] \begin{center} \includegraphics[width=30pc]{cineca.png}\hspace{2pc}% - \caption{\label{cineca-traffic}INFN Tier-1 – CINECA link usage} + \caption{\label{cineca-traffic}INFN Tier 1 – CINECA link usage} \end{center} \end{figure} @@ -118,7 +117,7 @@ CNAF has had an important role in determining how the whole of INFN would implem \subsection{Vulnerability scanning} -In an effort to monitor the security of the centre, CNAF has started a campaign of systematic and periodic scanning all of its machines, personal and not, looking for vulnerabilities in an effort to find and fixing them before they could be actively exploited by an attacker. +In an effort to monitor the security of the center, CNAF has started a campaign of systematic and periodic scanning all of its machines, personal and not, looking for vulnerabilities in an effort to find and fixing them before they could be actively exploited by an attacker. As expected, this scanning brought to light a number of issues that were promptly corrected (when possible) or mitigated (when not) thus nipping a number of potential problems in the bud. diff --git a/contributions/newchim/repnewchim18.tex b/contributions/newchim/repnewchim18.tex index 4855302d363c4bb6e4d6d61e371b9a01cdd0f1c8..9bdf4b78a2d5ba37e0cda4088129e37e68310d79 100644 --- a/contributions/newchim/repnewchim18.tex +++ b/contributions/newchim/repnewchim18.tex @@ -12,12 +12,12 @@ M.~Papa$^1$, S.~Pirrone$^{1}$, G.~Politi$^{2,1}$, F.~Rizzo$^{2,3}$, P.~Russotto$^{3}$, A.~Trifir\`o$^{5,1}$, M~Trimarchi$^{5,1}$ } -\address{$^1$ INFN, Sezione di Catania, Italy} -\address{$^2$ Dip. di Fisica e Astronomia, Universit\`a di Catania, Italy} -\address{$^3$ INFN, Laboratori Nazionali del Sud, Catania, Italy} -\address{$^4$ CSFNSM, Catania, Italy} -\address{$^5$ Dipartimento di Scienze MITF, Universit\`a di Messina, Italy} -\address{$^6$ Universit\`a di Enna, ``Kore'', Italy} +\address{$^1$ INFN Sezione di Catania, Catania, IT} +\address{$^2$ Universit\`a di Catania, Catania, IT} +\address{$^3$ INFN Laboratori Nazionali del Sud, Catania, IT} +\address{$^4$ CSFNSM, Catania, IT} +\address{$^5$ Universit\`a di Messina, Messina, IT} +\address{$^6$ Universit\`a di Enna, Enna, IT} \ead{defilippo@ct.infn.it} @@ -49,7 +49,7 @@ The total number of GET channels for the CHIMERA + FARCOS (20 telescopes) device \section{CNAF support for Newchim} In the new digital data acquisition we store the sampled signals, thus producing a huge set of raw data. The data rate can be evaluated at 3-5 TB/day in a experiment (without FARCOS). For example the last CHIMERA experiment in 2018 collected a total of 70 TB of data in two weeks of beam time. -Clearly this easily saturates our local disk servers storage capabilities. We use the CNAF as main backup storage center: after data merging and processing, the raw data (signals) are reduced to physical variables in ROOT format, while the original raw data are copied and stored at CNAF. Copy is done in the {\it /storage/gpfs...} storage area in the general purpose tier1-UI machines by using the Tier-1 infrastructure and middleware software. In the future could be interesting to use also the CPU resources at CNAF in order to run the data merger and signal processing software directly on the copied data. Indeed we expect a significative increase of the storage resources needed when the FARCOS array will be fully operational. +Clearly this easily saturates our local disk servers storage capabilities. We use the CNAF as main backup storage center: after data merging and processing, the raw data (signals) are reduced to physical variables in ROOT format, while the original raw data are copied and stored at CNAF. Copy is done in the {\it /storage/gpfs...} storage area in the general purpose tier 1-UI machines by using the Tier 1 infrastructure and middleware software. In the future could be interesting to use also the CPU resources at CNAF in order to run the data merger and signal processing software directly on the copied data. Indeed we expect a significative increase of the storage resources needed when the FARCOS array will be fully operational. \section*{References} @@ -64,4 +64,4 @@ Clearly this easily saturates our local disk servers storage capabilities. We us 012003 \bibitem{cas18} A. Castoldi, C. Guazzoni, T. Parsani, 2018 {\it Nuovo Cimento C} {\bf 41} 168 \end{thebibliography} -\end{document} \ No newline at end of file +\end{document} diff --git a/contributions/pett/pett.tex b/contributions/pett/pett.tex index ee19cf8098d3cd11c543363916a8f319bd105dd5..f367e970bac201e670288231b605007e9fd59d30 100644 --- a/contributions/pett/pett.tex +++ b/contributions/pett/pett.tex @@ -12,14 +12,14 @@ \begin{document} \title{External Projects and Technology Transfer} -\author{Cristina Vistoli, Barbara Martelli} -\address{INFN CNAF, viale Berti Pichat 6/2 40127 Bologna, Italy} +\author{C. Vistoli$^1$, B. Martelli$^1$} +\address{$^1$ INFN-CNAF, Bologna, IT} \ead{barbara.martelli@cnaf.infn.it} \begin{abstract} -The main mission of the External Projects and Technology Transfer Unit (PETT) is the coordination of CNAF activities funded by external organization (Region, Italian Ministry of Education, EU) and CNAF Technology Transfer actions. PETT Unit coordinates the INFN Technology Transfer Laboratory in Emilia Romagna (TTLab), accredited to the Emilia Romagna High Technology Network (HTN) since 2015. +External Projects and Technology Transfer Unit (PETT) main mission is the coordination of CNAF activities funded by external organizations (Region, Italian Ministry of Education, EU) and CNAF Technology Transfer actions. PETT Unit coordinates the INFN Technology Transfer Laboratory in Emilia Romagna (TTLab), accredited to the Emilia Romagna High Technology Network (HTN) since 2015. In 2018 TTLab submitted 4 proposals to the POR-FESR Emilia Romagna call, and at the beginning of 2019 three of them were approved and funded: FORTRESS, WE-LIGHT and SmartChain. In the meantime, the TROPIC project, approved in the former POR-FESR call, continued to run smoothly and the Harmony project started to move from a Proof of Concept phase to the production one. In its first year of life, the ISO 27001 Information Security Management System (ISMS) had to manage the critical situation originated by the datacenter flood happened at the end of 2017 and it did it successfully, passing the ISO 27001 external audit without any non-conformity. @@ -30,65 +30,134 @@ In its first year of life, the ISO 27001 Information Security Management System During 2018 the External Projects and Technology Transfer (PETT) Organizational Unit has contributed to various projects in the field of computing, communication of science, technology transfer and education. Some of the most relevant ones are: FiloBlu (POR-FESR Regione -Marche), Opus Facere (MIUR), Emilia Romagna Plan for high -competencies in Research Technology Transfer and Entrepreneurship, OPEN-NEXT and TROPIC(POR-FESR -2014-2020), Harmony. Great effort has been put on the -start up phase of the new Technology Transfer Laboratory which put together heterogeneous +Marche), Opus Facere (MIUR) \cite{opusfacere}, Emilia Romagna Plan for high +competencies in Research Technology Transfer and Entrepreneurship \cite{altecompetenze}, OPEN-NEXT and TROPIC(POR-FESR +2014-2020), Harmony \cite{harmony}. Great effort has been dedicated to the +consolidation of the Technology Transfer Laboratory (INFN-TTLab) \cite{ttlab} which puts together heterogeneous competencies (physics, computing, mechanics and electronics) from Emilia Romagna INFN Sections and Centers (Bologna, Ferrara and CNAF) in order to promote the transfer of INFN -know-how toward regional enterprises. In 2017 we concluded an activity finalized to -the ISO-27001 certification of a subset of the INFN Tier1 resources. This is required in order +know-how toward regional enterprises. In 2018 we operated the first year of life of the ISO-27001 ISMS consisting of a subset of INFN Tier 1 resources. This was required in order to store and manage private and sensitive personal data and could open new opportunities of -exploitation of the Tier1 resources in the near future. +exploitation of the Tier 1 resources in the near future. \section{The TROPIC project} -TTLab is coordinating the TROPIC project (Target for Radioisotope Production via anti-Channelling) in a consortium with COMECER and Biomeccanica Srl. +TTLab is coordinating the TROPIC project (Target for Radioisotope Production via anti-Channelling) \cite{tropic} in a consortium with COMECER and Biomeccanica Srl. The project is part of the European Regional Development Plan POR-FESR Axis 1, Research and Innovation 2014-2020 of the Emilia-Romagna region. Axis 1 aims at strengthening the regional network of research and technology transfer to companies with the purpose of increasing the ability to bring innovative solutions and products to the market. Through collaborations with researchers, it promotes innovation paths in the strategic areas of the regional production system and strengthens the high-tech network. The TROPIC project intends to explore a new radioisotope production method through the irradiation of solid targets in which a quantum effect called anti-channelling is exploited. Thanks to this effect, the probability of impact of the particle emitted by the accelerator on a crystalline matrix is much higher than the same dynamic but with a traditional amorphous solid target. This leads to an increase in the yield of nuclear bombardment and thus produces the desired isotope in larger quantities. The project intends to evaluate how much it is possible to save in terms of cost of the enriched material and how much the reaction yield grows in this particular configuration of the target. The ultimate goal is to make the production of these isotopes, extremely interesting from a diagnostic and therapeutic point of view, easier and less expensive. -The theoretical research activity has already produced various results published in the past years. The next step will be the experimental test and the realisation of the first prototype. - -\section{Summer Students and CNAF Log Analysis} -.... +The theoretical research activity has already produced various results published in the past years. The next step will be the experimental test and the realization of the first prototype. \section{The HARMONY Alliance} -The HARMONY project (Healthcare alliance for resourceful medicines offensive against neoplasms in hematology) \cite{harmony} is part of IMI’s Big Data for Better Outcomes programme, which aims to facilitate the use of diverse data sources to deliver results that reflect health outcomes of treatments that are meaningful for patients, clinicians, regulators, researchers, healthcare decision-makers, and others. +The HARMONY project (Healthcare alliance for resourceful medicines offensive against neoplasms in hematology) \cite{harmony} is part of IMI2 Big Data for Better Outcomes programme, which aims to facilitate the use of diverse data sources to deliver results that reflect health outcomes of treatments that are meaningful for patients, clinicians, regulators, researchers, healthcare decision-makers, and others. -Blood cancers, or haematologic cancers (e.g. leukaemia, lymphoma and myeloma), affect the production and function of blood cells and account for about one third of cancer cases in children and about one third of cancer deaths. As many blood cancers are rare, and healthcare practice varies across EU, a lack of data on relevant outcomes represents a challenge for clinicians, researchers, and decision-makers alike. The HARMONY project aims to use big data to deliver information that will help to improve the care of patients with these diseases. Specifically, the project will gather together, integrate and analyse anonymous patient data from a number of high quality sources. This will help the team to define clinical endpoints and outcomes for these diseases that are recognised by all key stakeholders. Meanwhile the project’s data sharing platform will facilitate and improve decision making for policy makers and clinicians alike to help them to give the right treatment to the right patient at the right time. More broadly, the project will result in a pan-European network of stakeholders with expertise in this disease area. -TTLab is involved as Linked Third Party of University of Bologna and is in charge of providing and managing the Harmony Big Data Platform Hosting. +Blood cancers, or haematologic cancers (e.g. leukaemia, lymphoma and myeloma), affect the production and function of blood cells and account for about one third of cancer cases in children and about one third of cancer deaths. As many blood cancers are rare, and healthcare practice varies across EU, a lack of data on relevant outcomes represents a challenge for clinicians, researchers, and decision-makers alike. The HARMONY project aims to use big data to deliver information that will help to improve the care of patients with these diseases. Specifically, the project will gather together, integrate and analyze anonymous patient data from a number of high quality sources. This will help the team to define clinical endpoints and outcomes for these diseases that are recognized by all key stakeholders. Meanwhile the project data sharing platform will facilitate and improve decision making for policy makers and clinicians alike to help them to give the right treatment to the right patient at the right time. More broadly, the project will result in a pan-European network of stakeholders with expertise in this disease area. +TTLab is involved as Linked Third Party of University of Bologna and is in charge of providing and managing the Harmony Big Data Platform Hosting in compliance with the ISO/IEC-27001 certification. -\section{Other regional activities} +\section{Other regional and national activities} In 2018, as industrial research lab of the Emilia Romagna High Technology Network \cite{htn}, TTLab carried out a number of activities in order to strengthen the link between research and industry sector. -In 2018 TTLab kept on contributing in the following Emilia Romagna Clust-ERs: +In 2018 TTLab kept on contributing in the following Emilia Romagna Clust-ERs \cite{clusters}: \begin{itemize} \item[--]INNOVATE (ICT): focused on the role of digital technologies as a means for innovate services in a global context and to emphasize their transformative power of the economy and society. \item[--]AGRIFOOD: covers the whole “from farm to fork†value chain, starting from the farmed produce all the way to the consumers’ plates, it includes ICT systems, equipment and machineries, transformation and packaging plants, logistics and food by-products and waste valorization. \item[--]CREATE: aims to improve the innovation in the culture and creative industries sector \item[--]HEALTH: focused on health related topics like biomed, pharmaceutical and omics sciences, smart and active living. In Emilia-Romagna there is the most important medtech district in Europe and regional policies encourage local research actors to team-up with private companies in order to maximize innovation. -\item[--]MECH: focused on the mechatronics and motor sector. A number of worldwide famous brands both in the automotive and in the mechanical sector are located in the region. These companies are at the cutting edge of a corporate system and can take advantage of technologies developed by our research teams. +\item[--]MECH: focused on the mech +nics and motor sector. A number of worldwide famous brands both in the automotive and in the mechanical sector are located in the region. These companies are at the cutting edge of a corporate system and can take advantage of technologies developed by our research teams. \item[--] BUILD: supports the innovation system in the building and construction field. \end{itemize} -Clust-ERs are recognised Associations, formed in accordance with articles 14-42 of the Italian Civil Code. Clust-ER Associations are communities of public and private bodies (research centres, businesses, training bodies) that share ideas, skills, tools, and resources to support the competitiveness of the most important production systems in Emilia-Romagna. In these Clust-ERs, research laboratories and centres for innovation belonging to the High Technology Network team up with the business system and the higher education system to make up the inter-disciplinary critical mass necessary to multiply opportunities and develop strategic projects with a high regional impact. Main objectives of Clust-ERs are: to maximise the opportunities for participating in European programmes and international research and innovation networks, to forge synergies and set up coordinated and stable networks and connections with other public/private agglomerations operating in the same sectors at national and European level, to encourage and support the development and creation of initiatives in higher education and the development of human resources and to support the development of new research infrastructures. -TTLab is part of the National Cluster ``Intelligent Factories'': an association that includes large and medium-small companies, universities and research centres, company associations and other stakeholders active in the advanced manufacturing sector. The association is recognized by MIUR as a driver of sustainable economic growth in all the regions of the national economic system, as it fosters the innovation and specialization of Italian manufacturing systems. The mission of CFI is to increase the competitiveness of the italian maufacturing industry through the design and implementation of a number of research projects for the development of new enabling technologies; to maintain and develop advanced skill in Italian manufacturing; to increase Italian companies access to national and international funds; to support entrepreneurship and company growth through the involvement of private investors. +Clust-ERs are recognized Associations, formed in accordance with articles 14-42 of the Italian Civil Code. Clust-ER Associations are communities of public and private bodies (research centers, businesses, training bodies) that share ideas, skills, tools, and resources to support the competitiveness of the most important production systems in Emilia-Romagna. Thanks to Clust-ERs, research laboratories and centers for innovation belonging to the High Technology Network team up with the business system and the higher education system to make up the inter-disciplinary critical mass necessary to multiply opportunities and develop strategic projects with a high regional impact. Main objectives of Clust-ERs are: to maximize the opportunities for participating in European programs and international research and innovation networks, to forge synergies and set up coordinated and stable networks and connections with other public/private agglomerations operating in the same sectors at national and European level, to encourage and support the development and creation of initiatives in higher education and the development of human resources and to support the development of new research infrastructures. + +INFN is part of the National Cluster ``Intelligent Factories'' \cite{cfi}: an association that includes large and medium-small companies, universities and research centers, company associations and other stakeholders active in the advanced manufacturing sector. The association is recognized by MIUR as a driver of sustainable economic growth in all the regions of the national economic system, as it fosters the innovation and specialization of Italian manufacturing systems. The mission of CFI is to increase the competitiveness of the Italian manufacturing industry through the design and implementation of a number of research projects for the development of new enabling technologies; to maintain and develop advanced skill in Italian manufacturing; to increase Italian companies access to national and international funds; to support entrepreneurship and company growth through the involvement of private investors. The INFN participates in this National Cluster through INFN personnel. -TTLab particapates to the project Opus Facere (Lab for Employability) \cite{opusfacere} with a course about Cosmic Rays Data Analysis, on the EEE \cite{eee} experiment data, where students of the fifth year of secondary school can understand and practice the job of data scientist and physicist. +TTLab participates in the Opus Facere project (Lab for Employability) \cite{opusfacere} with a course about Cosmic Rays Data Analysis, on the EEE \cite{eee} experiment data, where students of the fifth year of secondary school can understand and practice the job of data scientist and physicist. For more details, see dedicated contribution in this report. + +In 2018 TTLab started the activities founded by the Regional Plan for High Competencies for Research, Technology Transfer and Entrepreneurship \cite{altecompetenze} (5 research grants) for industrial research activities in different areas: models and algorithms for genome sequencing analysis, geospatial data access and processing services, big data analysis of physical, astrophysical and aerospatial data, big data analysis for smart cities. In particular, the research grants activated are: -In 2018 TTLab started the activities founded by the Regional Plan for High Competencies for Research, Technology Transfer and Entrepreneurship (5 research grants) for industrial research activities in different areas: models and algorithms for genome sequencing analysis, geospatial data access and processing services, big data analisys of physical, atrophysical and aerospatial data, big data analysis for smart cities. In particular, the research grants activated are: \begin{itemize} \item[--]Big Data Analysis: algorithms and models for the analysis of nucleic acid sequencing data - partner CIG/Unibo -\item[--]Big Data management: services for accessing and processing geospatial data in the era of Big Data – Industrial partner MEEO s.r.l Meteorological Enviromental Earth Observation http://www.meeo.it/wp/ -\item[--]Big Data management: the analysis of data in the field of physics, astrophysics and space science industrial partner VEM Sistemi S.p.A https://vem.com/en/ -\item[--]Big Data management: Big Data in Smart Cities Industrial partner Filippetti S.p.A https://www.filippetti.it/en/ +\item[--]Big Data management: services for accessing and processing geospatial data in the era of Big Data – Industrial partner MEEO s.r.l \cite{meeo} Meteorological Enviromental Earth Observation +\item[--]Big Data management: the analysis of data in the field of physics, astrophysics and space science industrial partner VEM Sistemi S.p.A \cite{vem} +\item[--]Big Data management: Big Data in Smart Cities Industrial partner Filippetti S.p.A \cite{filippetti} \end{itemize} -Finally, PETT coordinated the participation of INFN to the BI-REX (Big data Innovation and Research Excellence) Competence Center and actively contributed to the proposal. The BI-REX Competence Center is a project funded by the Ministry of Economic Development in the scope of Industry-4.0 plan. It involves 61 public and private partner, among which all Universities and Research Agencies in the Emilia Romagna region and a number of enterprises of all sizes (multinationals, large and SME). +Three out of four proposals to Emilia Romagna POR-FESR 2018 call were funded: +\begin{itemize} +\item[--] FORTRESS: coordinated by INFN-TTLab, targets the innovative use of thin film transistors as direct radiation detectors integrated into large area flexible patches for two innovative applications. Two demos will be developed: SAFEINJECT and BEAMGUARD. +Two advanced material platforms: organic and perovskite thin films. They share the unique capability of realizing simple, thin and flexible transistors able to directly detect ionizing radiation and apt to be fabricated as thin, lightweight, low-power operated, large-area 2D pixelated matrices. + +\item[--] WE-LIGHT (WEarable LIGHTing for smart accessories and apparels): coordinated by UniMoRe with participation of INFN-TTLab, proposes the creation of prototypes of sportswear integrated with different technological systems of electronic, optical and sensorial type, able to connect whoever wear to the external environment. +\item[--] SmartChain: coordinated by UniMoRe with participation of INFN-TTLab, proposes the creation of a set of solutions based on +blockchain technologies to identify and implement innovative platforms useful to businesses of the Emilia Romagna territory. The project will analyze the current production, certification and tracking scenarios supply chains, proposing and implementing software systems that can improve the efficiency of production chains. +\end{itemize} + +Finally, PETT coordinated the participation of INFN to the BI-REX (Big data Innovation and Research Excellence) Competence Center and actively contributed to the proposal. The BI-REX Competence Center is a project funded by the Ministry of Economic Development in the scope of Industry-4.0 plan. It is composed by a pilot plant which aims to reconstruct an entire digital manufacturing process for mechanical components, which can demonstrate the potential of new technologies for production, while allowing the realization of finished products that can be used directly as demonstrators for different supply chains such as the automotive, mechatronics and biomedical industries. + +The pilot will be assisted by ICT systems for dynamic monitoring and reconfiguration of the various islands, +both separately and in integration, and elastic cloud / edge platforms for data collection and analytics from +sensors. +The Bi-Rex Competence Center involves 49 companies, 7 universities and 5 research institutions and it is cofounded by Ministry of Economic Development and private partners. The total amount of the founding is about 20 MEuro. + +Bi-Rex main area of research are: + +\begin{itemize} +\item[--] Additive Manufacturing +\item[--] ICT and automation in manufacturing industries +\item[--] Big Data and new digital business models +\item[--] Logistics +\item[--] Environmental and economical sustainability +\end{itemize} + +CNAF will contribute to the Competence Center with its expertise on Big Data management and on Cloud integration with HPC, IoT and Edge technologies. + +\section{SUPER} +INFN-TTLab worked on the proposal preparation and participates in the SUPER Supercomputing infrastructure and Big Data Analytics for advanced applications in the field of life sciences, advanced materials and innovative production systems. The partnership is composed of 12 subjects and includes the major regional players for the following areas: +\begin{itemize} +\item[--] Supercomputing and big data: CINECA, INFN, which have world-class infrastructures, CMCC, and +ENEA, which have national Tier 1 class systems; CNR, INAF and INGV that have departmental systems +and qualifying databases within their institutional contexts. +\item[--] Genomics, regenerative medicine, and biobanks: University of Bologna, University of Modena and +Reggio Emilia, Rizzoli Orthopedic Institutes, University of Ferrara and University of Parma. +\item[--] Innovative industrial materials and systems: University of Bologna, University of Modena and Reggio +Emilia, CNR, University of Ferrara, University of Parma, ENEA. +\end{itemize} + + +\section{Outreach} +INFN CNAF Knowledge Transfer strategy roots in its connections with the INFN central structures related to this purpose (INFN External funds unit and INFN National Technology Transfer Committee) and takes advantage of its relationships with local economy and regional administration. The PETT Unit is coordinating the actions needed to translate this strategy into reality. CNAF and the PETT Unit are strongly committed to leverage the virtuous relationship between the datacenter personnel's big-data competencies and the R\&D activities, both at the forefront of technology, in order to bring back to society this innovation force. In fact, thanks to the experience gained running the LHC computing infrastructures, CNAF personnel is probably within the most skilled staffs in Italy in the field of big data management and HPC computing. Moreover, in the field of Cloud Computing CNAF has a primary role as R\&D actor and integrator of Cloud technologies with Internet of Things, Low Power computing and Edge computing systems. +All of them (big data, HPC and cloud computing) are some of the key technologies mentioned by the Italian Ministry of Economic Development National Plan Impresa 4.0 (formerly Industria 4.0) as a driver to improve competitiveness in the industrial sector, making CNAF one of the most promising actors in the technology transfer field. +INFN mission includes, in addition to research, the transfer to the society of the acquired knowledge. This definition means both the transfer of know-how in the form of training and technology transfer, and the dissemination of scientific culture. In order to make its intervention more effective, in 2016 the INFN was equipped with a Coordination Committee for the Third Mission (CC3M). The primary objective of this Committee is to coordinate local initiatives for the dissemination of scientific culture with national impact to strengthen its effectiveness. CNAF is linked to CC3M through a local representative (from the PETT Unit) whom reports local activities to the Committee. + +Main outreach activities performed by CNAF personnel are: + +\begin{itemize} +\item[--] Training internships (summer students, curricular internships) \cite{summerstudents} +\item[--] Guided tours in the Tier 1 datacenter premises +\item[--] Coordination and holding of University and PhD courses on the topic of Infrastructure for Big +Data processing +\item[--] Outreach events like The European Researchers' Night 2017 an initiative promoted by the European Commission since 2005 (Marie Sklodowska-Curie actions) which involves thousands of researchers and research institutions in all European countries every year. It takes place every year throughout Europe on the last Friday of September. The goal is to create opportunities for researchers and citizens to meet to spread the scientific culture and knowledge of research professions in an informal and stimulating context. CNAF contributes to events such as live scientific experiments and demonstrations, exhibitions and guided tours, conferences and informative seminars, shows, concerts and artistic performances. +\item[--] School-work alternation within the OpusFacere Territorial Laboratory for Employability \cite{opusfacere}an innovative educational project that comes from a network composed by educational institutes of the Metropolitan City of Bologna and public and private partners of the territory. In this context, +CNAF designed and hold a course named Cosmic Rays Data Analysis based on data collected by the Extreme Energy Events project and aimed at teach high school students the job of physicist \cite{eee-opusfacere}. +\end{itemize} \section{Conclusions} In 2018 the External Projects and Technology Transfer group consolidated its collaboration with the research and innovation regional system participating and contributing to many initiatives aimed at creating a strong partnership between the research and industry sectors. -\bibliographystyle{iopart-num} -\section*{References} -\bibliography{bibliopett} - -\end{document} \ No newline at end of file +\section*{References} +\begin{thebibliography}{9} +\bibitem{altecompetenze} \url{https://formazionelavoro.regione.emilia-romagna.it/alta-formazione-ricerca/approfondimenti/piano-alte-competenze}, site visited on June 2019. +\bibitem{harmony} \url{https://www.harmony-alliance.eu/},site visited on June 2019. +\bibitem{ttlab} \url{https://ttlab.infn.it/}, site visited on June 2019. +\bibitem{tropic} \url{https://agenda.infn.it/event/15101/contributions/28472/attachments/20303/23011/TROPIC_20190213_ebagli.pdf}, site visited on June 2019. +\bibitem{htn} \url{https://www.retealtatecnologia.it/en}, site visited on June 2019. +\bibitem{clusters} \url{https://www.retealtatecnologia.it/en/clust-er site}, visited on June 2019. +\bibitem{cfi} \url{https://www.fabbricaintelligente.it/}, site visited on June 2019. +\bibitem{meeo} \url{http://www.meeo.it/wp/}, site visited on June 2019. +\bibitem{vem} \url{https://vem.com/en/}, site visited on June 2019. +\bibitem{filippetti} \url{https://www.filippetti.it/en/}, site visited on June 2019. +\bibitem{opusfacere} \url{http://www.opusfacere.it/}, site visited on June 2019. +\bibitem{eee} \url{https://eee.centrofermi.it/}, site visited on June 2019. +\bibitem{eee-opusfacere} Martelli B, Noferini F, Pellegrino C, Ronchieri E, Vistoli C, Seminar \emph{Cosmic Rays Data Analysis: insegnando Python con un Jupyter Notebook} \url{https://agenda.infn.it/event/19607/}, site visited on June 2019. +\bibitem{summerstudents} \url{https://agenda.infn.it/event/17430/}, site visited on June 2019. +\end{thebibliography} +\end{document} diff --git a/contributions/sc18/SC18.tex b/contributions/sc18/SC18.tex index 5f89285ba86079494ac25bf8a8108fb1ac39ccea..fed559d166f13aaf6ffc044cad5fed7b0ab22961 100644 --- a/contributions/sc18/SC18.tex +++ b/contributions/sc18/SC18.tex @@ -4,13 +4,13 @@ \title{ The annual international conference of high performance computing: SC18 from INFN point of view} %\address{Production Editor, \jpcs, \iopp, Dirac House, Temple Back, Bristol BS1~6BE, UK} -\author{A. Costantini$^1$, D. Salomoni$^1$, S. Zani$^1$, S. Longo$^1$, L.Chiarelli$^2$ and G. Grandi$^3$ +\author{A. Costantini$^1$, D. Salomoni$^1$, S. Zani$^1$, S. Longo$^1$, L.Chiarelli$^2$, G. Grandi$^3$ % etc. } -\address{$^1$ INFN-CNAF, Bologna, Italy} -\address{$^2$ GARR, Bologna, Italy} -\address{$^3$ INFN-Bologna, Bologna, Italy} +\address{$^1$ INFN-CNAF, Bologna, IT} +\address{$^2$ GARR, Bologna, IT} +\address{$^3$ INFN Sezione di Bologna, Bologna, IT} \ead{alessandro.costantini@cnaf.infn.it} @@ -28,7 +28,7 @@ SC18 \cite{sc18} marks the 30th anniversary of the annual international conferen INFN \cite{infn} is participating to this international conference since 2012 with the support and the collaboration of \begin{itemize} \item CINECA \cite{cineca}, a not-for-profit Consortium, made up of 67 Italian universities, 9 Italian Research Institutions, 1 Polyclinic and the Italian Ministry of Education. - Today it is the largest Italian computing centre, one of the most important worldwide. + Today it is the largest Italian computing center, one of the most important worldwide. \item INAF \cite{inaf}, is the most important Italian institution conducting scientific research in astronomy and astrophysics. Research ranges from the study of the planets and minor bodies of the Solar system up to the large-scale structure of the Universe and groups and clusters of galaxies on cosmological scales. \item GARR\cite{garr}, the ultra-broadband network dedicated to the Italian research and education community. Its main objective is to provide high-performance @@ -102,14 +102,14 @@ In particular, the following list of posters, and related contributors, have bee \item COmputing on SoC Architecture: the INFN COSA Project \item INAF - HPC New challenges and perspectives CINECA and EU-H2020 Programs \item CRESCO6: tech specs \& benchmarks (ENEA) -\item ENEA partnership in the European Energy oriented Centre of Excellence (ENEA) +\item ENEA partnership in the European Energy oriented Center of Excellence (ENEA) \item INFN Collaboration and Opportunities \item DataCloud HYBRID SERVICES From Integrated Solutions to Exascale and beyond \item INFN Service Portfolio \item The INDIGO Identity and Access Management Service \item HTC, HPC, CLOUD, no matter what your flavors are. New users are welcome at ReCaS-Bari DataCenter \item HPC for Theoretical Physics at INFN -\item INFN CNAF “The Italian LHC TIER-1" +\item INFN CNAF “The Italian LHC TIER 1" \end{itemize} diff --git a/contributions/sd_iam/biblio.bib b/contributions/sd_iam/biblio.bib new file mode 100644 index 0000000000000000000000000000000000000000..b5a6474a687e2f287207c88316b7113d96dd0b22 --- /dev/null +++ b/contributions/sd_iam/biblio.bib @@ -0,0 +1,428 @@ +@techreport{jwt, + author = {Michael B. Jones and John Bradley and Nat Sakimura}, + title = {{The JSON Web Token RFC}}, + type = {RFC}, + number = 7519, + year = {2015}, + month = {May}, + issn = {2070-1721}, + publisher = {IETF Tools}, + institution = {IETF Tools}, + url = {https://tools.ietf.org/rfc/rfc7519.txt} +} + +@techreport{oauth, + author = {Dick Hardt}, + title = {{The OAuth 2.0 Authorization Framework}}, + type = {RFC}, + number = 6749, + year = {2012}, + month = {October}, + issn = {2070-1721}, + publisher = {IETF Tools}, + institution = {IETF Tools}, + url = {https://tools.ietf.org/rfc/rfc6749.txt} +} + +@techreport{oauth-token-exchange, + author = {Michael B. Jones and Anthony Nadalin and Brian Campbell + and John Bradley and Chuck Mortimore}, + title = {{OAuth 2.0 Token Exchange}}, + type = {Internet-Draft}, + number = "draft-ietf-oauth-token-exchange-16.txt", + year = {2019}, + month = {April}, + day = {22}, + institution = {IETF Tools}, + url = {https://tools.ietf.org/id/draft-ietf-oauth-token-exchange-16.txt} +} + +@techreport{oauth-metadata, + author = {Michael B. Jones and Nat Sakimura and John Bradley}, + title = {{OAuth 2.0 Authorization Server Metadata}}, + type = {RFC}, + number = 8414, + year = {2018}, + month = {June}, + issn = {2070-1721}, + publisher = {IETF Tools}, + institution = {IETF Tools}, + url = {https://tools.ietf.org/rfc/rfc8414.txt} +} + +@online{oidc, + author = {{OpenID Foundation}}, + title = {{The OpenID Connect identity layer}}, + year = 2018, + url = {https://openid.net/connect/}, + urldate = {2018-12-03} +} + +@online{oidc-discovery, + author = {{Nat Sakimura and John Bradley and Michael B. Jones and Edmund Jay}}, + title = {{The OpenID Connect discovery specification}}, + year = 2014, + url = {https://openid.net/specs/openid-connect-discovery-1_0.html}, + urldate = {2018-12-03} +} + +@online{igtf, + title = {{The Interoperable Global Trust Federation}}, + url = {https://www.igtf.net/}, + urldate = {2018-12-03} +} + +@online{x509, + title = {{X.509}}, + url = {https://en.wikipedia.org/wiki/X.509}, + urldate = {2018-12-03} +} + +@article{GSI, + author = {Von Welch and + Frank Siebenlist and + Ian T. Foster and + John Bresnahan and + Karl Czajkowski and + Jarek Gawor and + Carl Kesselman and + Sam Meder and + Laura Pearlman and + Steven Tuecke}, + title = {Security for Grid Services}, + journal = {CoRR}, + volume = {cs.CR/0306129}, + year = {2003}, + url = {http://arxiv.org/abs/cs.CR/0306129}, + timestamp = {Mon, 13 Aug 2018 16:49:07 +0200}, + biburl = {https://dblp.org/rec/bib/journals/corr/cs-CR-0306129}, + bibsource = {dblp computer science bibliography, https://dblp.org} +} + +@software{VOMS, + author = {Vincenzo Ciaschini and Valerio Venturi and Andrea Ceccanti}, + title = {{The Virtual Organisation Membership Service}}, + doi = {10.5281/zenodo.1875371}, + url = {https://doi.org/10.5281/zenodo.1875371} +} + +@online{edugain, + title = {{eduGAIN interfederation website}}, + url = {http://www.geant.org/Services/Trust_identity_and_security/eduGAIN}, + urldate = {2018-12-03} +} + +@online{google, + title = {{The Google Identity Platform}}, + url = {https://developers.google.com/identity/}, + urldate = {2018-12-03} +} + +@online{scim, + title = {{The System for Cross Domain Identity Management website}}, + url = {http://www.simplecloud.info/}, + urldate = {2018-12-03} +} + +@article{indigo-aai-chep2016, + author={Andrea Ceccanti and Marcus Hardt and Bas Wegh and A. Paul Millar + and Marco Caberletti and Enrico Vianello and Slavek Licehammer}, + title={{The INDIGO-Datacloud Authentication and Authorization Infrastructure}}, + journal={Journal of Physics: Conference Series}, + volume={898}, + number={10}, + pages={102016}, + url={http://iopscience.iop.org/article/10.1088/1742-6596/898/10/102016}, + year={2017} +} + + +@software{iam, + author = {Andrea Ceccanti and Enrico Vianello and Marco Caberletti}, + title = {{INDIGO Identity and Access Management (IAM)}}, + doi = {10.5281/zenodo.1874790}, + url = {https://doi.org/10.5281/zenodo.1874790} +} + + +@software{voms-admin, + author = {Andrea Ceccanti}, + title = {{The VOMS administration service}}, + doi = {10.5281/zenodo.1875616}, + url = {https://doi.org/10.5281/zenodo.1875616} +} + +@misc{cwp, +Author = {{HEP Software Foundation} and Johannes Albrecht and Antonio + Augusto {Alves} Jr and Guilherme Amadio and Giuseppe Andronico and Nguyen + Anh-Ky and Laurent Aphecetche and John Apostolakis and Makoto Asai and Luca + Atzori and Marian Babik and Giuseppe Bagliesi and Marilena Bandieramonte + and Sunanda Banerjee and Martin Barisits and Lothar A. T. Bauerdick and + Stefano Belforte and Douglas Benjamin and Catrin Bernius and Wahid Bhimji + and Riccardo Maria Bianchi and Ian Bird and Catherine Biscarat and Jakob + Blomer and Kenneth Bloom and Tommaso Boccali and Brian Bockelman and Tomasz + Bold and Daniele Bonacorsi and Antonio Boveia and Concezio Bozzi and Marko + Bracko and David Britton and Andy Buckley and Predrag Buncic and Paolo + Calafiura and Simone Campana and Philippe Canal and Luca Canali and + Gianpaolo Carlino and Nuno Castro and Marco Cattaneo and Gianluca Cerminara + and Javier Cervantes Villanueva and Philip Chang and John Chapman and Gang + Chen and Taylor Childers and Peter Clarke and Marco Clemencic and Eric + Cogneras and Jeremy Coles and Ian Collier and David Colling and Gloria + Corti and Gabriele Cosmo and Davide Costanzo and Ben Couturier and Kyle + Cranmer and Jack Cranshaw and Leonardo Cristella and David Crooks and + Sabine Crépé-Renaudin and Robert Currie and Sünje Dallmeier-Tiessen and + Kaushik De and Michel De Cian and Albert De Roeck and Antonio Delgado Peris + and Frédéric Derue and Alessandro Di Girolamo and Salvatore Di Guida and + Gancho Dimitrov and Caterina Doglioni and Andrea Dotti and Dirk Duellmann + and Laurent Duflot and Dave Dykstra and Katarzyna Dziedziniewicz-Wojcik and + Agnieszka Dziurda and Ulrik Egede and Peter Elmer and Johannes Elmsheuser + and V. Daniel Elvira and Giulio Eulisse and Steven Farrell and Torben + Ferber and Andrej Filipcic and Ian Fisk and Conor Fitzpatrick and José Flix + and Andrea Formica and Alessandra Forti and Giovanni Franzoni and James + Frost and Stu Fuess and Frank Gaede and Gerardo Ganis and Robert Gardner + and Vincent Garonne and Andreas Gellrich and Krzysztof Genser and Simon + George and Frank Geurts and Andrei Gheata and Mihaela Gheata and Francesco + Giacomini and Stefano Giagu and Manuel Giffels and Douglas Gingrich and + Maria Girone and Vladimir V. Gligorov and Ivan Glushkov and Wesley Gohn and + Jose Benito Gonzalez Lopez and Isidro González Caballero and Juan R. + González Fernández and Giacomo Govi and Claudio Grandi and Hadrien Grasland + and Heather Gray and Lucia Grillo and Wen Guan and Oliver Gutsche and + Vardan Gyurjyan and Andrew Hanushevsky and Farah Hariri and Thomas Hartmann + and John Harvey and Thomas Hauth and Benedikt Hegner and Beate Heinemann + and Lukas Heinrich and Andreas Heiss and José M. Hernández and Michael + Hildreth and Mark Hodgkinson and Stefan Hoeche and Burt Holzman and Peter + Hristov and Xingtao Huang and Vladimir N. Ivanchenko and Todor Ivanov and + Jan Iven and Brij Jashal and Bodhitha Jayatilaka and Roger Jones and Michel + Jouvin and Soon Yung Jun and Michael Kagan and Charles William Kalderon and + Meghan Kane and Edward Karavakis and Daniel S. Katz and Dorian Kcira and + Oliver Keeble and Borut Paul Kersevan and Michael Kirby and Alexei + Klimentov and Markus Klute and Ilya Komarov and Dmitri Konstantinov and + Patrick Koppenburg and Jim Kowalkowski and Luke Kreczko and Thomas Kuhr and + Robert Kutschke and Valentin Kuznetsov and Walter Lampl and Eric Lancon and + David Lange and Mario Lassnig and Paul Laycock and Charles Leggett and + James Letts and Birgit Lewendel and Teng Li and Guilherme Lima and Jacob + Linacre and Tomas Linden and Miron Livny and Giuseppe Lo Presti and + Sebastian Lopienski and Peter Love and Adam Lyon and Nicolò Magini and + Zachary L. Marshall and Edoardo Martelli and Stewart Martin-Haugh and Pere + Mato and Kajari Mazumdar and Thomas McCauley and Josh McFayden and Shawn + McKee and Andrew McNab and Rashid Mehdiyev and Helge Meinhard and Dario + Menasce and Patricia Mendez Lorenzo and Alaettin Serhan Mete and Michele + Michelotto and Jovan Mitrevski and Lorenzo Moneta and Ben Morgan and + Richard Mount and Edward Moyse and Sean Murray and Armin Nairz and Mark S. + Neubauer and Andrew Norman and Sérgio Novaes and Mihaly Novak and Arantza + Oyanguren and Nurcan Ozturk and Andres Pacheco Pages and Michela Paganini + and Jerome Pansanel and Vincent R. Pascuzzi and Glenn Patrick and Alex + Pearce and Ben Pearson and Kevin Pedro and Gabriel Perdue and Antonio + Perez-Calero Yzquierdo and Luca Perrozzi and Troels Petersen and Marko + Petric and Andreas Petzold and Jónatan Piedra and Leo Piilonen and Danilo + Piparo and Jim Pivarski and Witold Pokorski and Francesco Polci and Karolos + Potamianos and Fernanda Psihas and Albert Puig Navarro and Günter Quast and + Gerhard Raven and Jürgen Reuter and Alberto Ribon and Lorenzo Rinaldi and + Martin Ritter and James Robinson and Eduardo Rodrigues and Stefan Roiser + and David Rousseau and Gareth Roy and Grigori Rybkine and Andre Sailer and + Tai Sakuma and Renato Santana and Andrea Sartirana and Heidi Schellman and + Jaroslava Schovancová and Steven Schramm and Markus Schulz and Andrea + Sciabà and Sally Seidel and Sezen Sekmen and Cedric Serfon and Horst + Severini and Elizabeth Sexton-Kennedy and Michael Seymour and Davide + Sgalaberna and Illya Shapoval and Jamie Shiers and Jing-Ge Shiu and Hannah + Short and Gian Piero Siroli and Sam Skipsey and Tim Smith and Scott Snyder + and Michael D. Sokoloff and Panagiotis Spentzouris and Hartmut Stadie and + Giordon Stark and Gordon Stewart and Graeme A. Stewart and Arturo Sánchez + and Alberto Sánchez-Hernández and Anyes Taffard and Umberto Tamponi and + Jeff Templon and Giacomo Tenaglia and Vakhtang Tsulaia and Christopher + Tunnell and Eric Vaandering and Andrea Valassi and Sofia Vallecorsa and + Liviu Valsan and Peter Van Gemmeren and Renaud Vernet and Brett Viren and + Jean-Roch Vlimant and Christian Voss and Margaret Votava and Carl Vuosalo + and Carlos Vázquez Sierra and Romain Wartel and Gordon T. Watts and Torre + Wenaus and Sandro Wenzel and Mike Williams and Frank Winklmeier and + Christoph Wissing and Frank Wuerthwein and Benjamin Wynne and Zhang Xiaomei + and Wei Yang and Efe Yazgan}, Title = {{A Roadmap for HEP Software and + Computing R\&D for the 2020s}}, Year = {2017}, Eprint = {arXiv:1712.06982}, +} + +@online{scitokens, + title = {{The SciTokens project}}, + url = {https://scitokens.org}, + urldate = {2018-12-03} +} + +@online{kubernetes, + title = {{The Kubernetes container orchestrator}}, + url = {https://kubernetes.io}, + urldate = {2018-12-03} +} + +@online{openstack, + title = {{The Openstack IAAS framework}}, + url = {https://www.openstack.org}, + urldate = {2018-12-03} +} + +@online{fts, + title = {{The CERN File Transfer Service}}, + url = {https://fts.web.cern.ch}, + urldate = {2018-12-03} +} + +@online{storm, + title = {{The StoRM storage element}}, + url = {https://italiangrid.github.io/storm}, + urldate = {2018-12-03} +} + +@online{dcache, + title = {{The dCache storage solution}}, + url = {https://dcache.org}, + urldate = {2018-12-03} +} + +@online{oidc-rande, + title = {{The OpenID Research \& Education working group}}, + url = {https://openid.net/wg/rande}, + urldate = {2018-12-03} +} + +@techreport{voms-ac-format, + author = {Vincenzo Ciaschini and Valerio Venturi and Andrea Ceccanti}, + title = {{The VOMS Attribute Certificate format }}, + year = {2011}, + month = {August}, + publisher = {Open Grid Forum}, + institution = {Open Grid Forum}, + url = {https://www.ogf.org/documents/GFD.182.pdf} +} + +@online{aarc-blueprint, + title = {{The AARC Blueprint Architecture}}, + url = {https://aarc-project.eu/architecture}, + urldate = {2018-12-03} +} + +@online{rcauth-ssh, + title = {{RCAuth.eu: getting proxies using SSH key AuthN}}, + author = {Mischa Sall\'e}, + url = {https://indico.cern.ch/event/669715/contributions/2739035/attachments/1532101/2398499/RCauth_SSH_wlcg_authz_wg.pdf}, + urldate = {2018-12-03} +} + +@online{oauth4myproxy, + title = {{OAuth for MyProxy}}, + url = {http://grid.ncsa.illinois.edu/myproxy/oauth/}, + urldate = {2019-03-18} +} + +@online{rcauth, + title = {{The RCAuth online CA}}, + url = {https://rcauth.eu}, + urldate = {2018-12-03} +} + +@online{dodas, + title = {{Dynamic On Demand Analysis Service: DODAS}}, + url = {https://dodas-ts.github.io/dodas-doc}, + urldate = {2018-12-03} +} + +@online{eosc-hub, + title = {{The EOSC-Hub project}}, + url = {https://www.eosc-hub.eu}, + urldate = {2018-12-03} +} + +@online{aarc, + title = {{The AARC project}}, + url = {https://aarc-project.eu}, + urldate = {2018-12-03} +} + +@online{fim4r, + title = {{Federated Identity Management for Research}}, + url = {https://fim4r.org}, + urldate = {2018-12-03} +} + +@online{wlcg-authz-wg, + title = {{The WLCG Authorization Working Group}}, + url = {https://twiki.cern.ch/twiki/bin/view/LCG/WLCGAuthorizationWG}, + urldate = {2018-12-03} +} + +@online{nikhef, + title = {{The Dutch National Insititute for Sub-atomic Physics}}, + url = {https://www.nikhef.nl}, + urldate = {2019-5-10} +} + +@misc{indigo-datacloud, + Author = {INDIGO-DataCloud Collaboration and : and Davide Salomoni and Isabel + Campos and Luciano Gaido and Jesus Marco de Lucas and Peter Solagna and Jorge + Gomes and Ludek Matyska and Patrick Fuhrman and Marcus Hardt and Giacinto + Donvito and Lukasz Dutka and Marcin Plociennik and Roberto Barbera and + Ignacio Blanquer and Andrea Ceccanti and Mario David and Cristina Duma and + Alvaro López-GarcÃa and Germán Moltó and Pablo Orviz and Zdenek Sustr and + Matthew Viljoen and Fernando Aguilar and Luis Alves and Marica Antonacci + and Lucio Angelo Antonelli and Stefano Bagnasco and Alexandre M. J. J. + Bonvin and Riccardo Bruno and Eva Cetinic and Yin Chen and Alessandro Costa + and Davor Davidovic and Benjamin Ertl and Marco Fargetta and Sandro Fiore + and Stefano Gallozzi and Zeynep Kurkcuoglu and Lara Lloret and Joao Martins + and Alessandra Nuzzo and Paola Nassisi and Cosimo Palazzo and Joao Pina and + Eva Sciacca and Daniele Spiga and Marco Antonio Tangaro and Michal Urbaniak + and Sara Vallero and Bas Wegh and Valentina Zaccolo and Federico Zambelli + and Tomasz Zok}, + Title = {{INDIGO-DataCloud:A data and computing platform to facilitate seamless + access to e-infrastructures}}, + Year = {2017}, + Eprint = {arXiv:1711.01981}, +} + +@online{kubernetes-labels, + title = {{Kubernetes labels and selectors}}, + url = {https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/}, + urldate = {2018-12-03} +} + +@online{spid, + title = {{Sistema Pubblico di Identità Digitale}}, + url = {https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/}, + urldate = {2018-12-03} +} + +@online{hr-db-api-service, + title = {{CERN HR DB API service }}, + url = {https://baltig.infn.it/aceccant/cern-hr-db-service}, + urldate = {2018-12-03} +} + +@online{cern-openshift, + title = {{CERN Openshift PAAS infrastructure}}, + url = {http://information-technology.web.cern.ch/services/PaaS-Web-App}, + urldate = {2018-12-03} +} +@online{keycloak, + title = {{The Keycloak Identity and Access Management system}}, + url = {https://www.keycloak.org/}, + urldate = {2018-12-03} +} + +@inproceedings{cern-sso, + doi = {10.1088/1742-6596/119/8/082008}, + url = {https://doi.org/10.1088%2F1742-6596%2F119%2F8%2F082008}, + year = 2008, + volume = {119}, + number = {8}, + pages = {082008}, + author = {E Ormancey}, + title = {{CERN} single sign on solution}, + booktitle = {Journal of Physics: Conference Series} +} + +@inproceedings{voms-convergence, + author={Andrea Ceccanti and Vincenzo Ciaschini and Maria Dimou and Gabriele Garzoglio and Tanya Levshina and Steve Traylen and Valerio Venturi}, + title={{VOMS/VOMRS utilization patterns and convergence plan}}, + booktitle={Journal of Physics: Conference Series}, + volume={219}, + number={6}, + pages={062006}, + url={http://stacks.iop.org/1742-6596/219/i=6/a=062006}, + year={2010} +} diff --git a/contributions/sd_iam/iopams.sty b/contributions/sd_iam/iopams.sty new file mode 100644 index 0000000000000000000000000000000000000000..044dde929745d48d13601b572a0f586728ebf0a4 --- /dev/null +++ b/contributions/sd_iam/iopams.sty @@ -0,0 +1,87 @@ +%% +%% This is file `iopams.sty' +%% File to include AMS fonts and extra definitions for bold greek +%% characters for use with iopart.cls +%% +\NeedsTeXFormat{LaTeX2e} +\ProvidesPackage{iopams}[1997/02/13 v1.0] +\RequirePackage{amsgen}[1995/01/01] +\RequirePackage{amsfonts}[1995/01/01] +\RequirePackage{amssymb}[1995/01/01] +\RequirePackage{amsbsy}[1995/01/01] +% +\iopamstrue % \newif\ifiopams in iopart.cls & iopbk2e.cls +% % allows optional text to be in author guidelines +% +% Bold lower case Greek letters +% +\newcommand{\balpha}{\boldsymbol{\alpha}} +\newcommand{\bbeta}{\boldsymbol{\beta}} +\newcommand{\bgamma}{\boldsymbol{\gamma}} +\newcommand{\bdelta}{\boldsymbol{\delta}} +\newcommand{\bepsilon}{\boldsymbol{\epsilon}} +\newcommand{\bzeta}{\boldsymbol{\zeta}} +\newcommand{\bfeta}{\boldsymbol{\eta}} +\newcommand{\btheta}{\boldsymbol{\theta}} +\newcommand{\biota}{\boldsymbol{\iota}} +\newcommand{\bkappa}{\boldsymbol{\kappa}} +\newcommand{\blambda}{\boldsymbol{\lambda}} +\newcommand{\bmu}{\boldsymbol{\mu}} +\newcommand{\bnu}{\boldsymbol{\nu}} +\newcommand{\bxi}{\boldsymbol{\xi}} +\newcommand{\bpi}{\boldsymbol{\pi}} +\newcommand{\brho}{\boldsymbol{\rho}} +\newcommand{\bsigma}{\boldsymbol{\sigma}} +\newcommand{\btau}{\boldsymbol{\tau}} +\newcommand{\bupsilon}{\boldsymbol{\upsilon}} +\newcommand{\bphi}{\boldsymbol{\phi}} +\newcommand{\bchi}{\boldsymbol{\chi}} +\newcommand{\bpsi}{\boldsymbol{\psi}} +\newcommand{\bomega}{\boldsymbol{\omega}} +\newcommand{\bvarepsilon}{\boldsymbol{\varepsilon}} +\newcommand{\bvartheta}{\boldsymbol{\vartheta}} +\newcommand{\bvaromega}{\boldsymbol{\varomega}} +\newcommand{\bvarrho}{\boldsymbol{\varrho}} +\newcommand{\bvarzeta}{\boldsymbol{\varsigma}} %NB really sigma +\newcommand{\bvarsigma}{\boldsymbol{\varsigma}} +\newcommand{\bvarphi}{\boldsymbol{\varphi}} +% +% Bold upright capital Greek letters +% +\newcommand{\bGamma}{\boldsymbol{\Gamma}} +\newcommand{\bDelta}{\boldsymbol{\Delta}} +\newcommand{\bTheta}{\boldsymbol{\Theta}} +\newcommand{\bLambda}{\boldsymbol{\Lambda}} +\newcommand{\bXi}{\boldsymbol{\Xi}} +\newcommand{\bPi}{\boldsymbol{\Pi}} +\newcommand{\bSigma}{\boldsymbol{\Sigma}} +\newcommand{\bUpsilon}{\boldsymbol{\Upsilon}} +\newcommand{\bPhi}{\boldsymbol{\Phi}} +\newcommand{\bPsi}{\boldsymbol{\Psi}} +\newcommand{\bOmega}{\boldsymbol{\Omega}} +% +% Bold versions of miscellaneous symbols +% +\newcommand{\bpartial}{\boldsymbol{\partial}} +\newcommand{\bell}{\boldsymbol{\ell}} +\newcommand{\bimath}{\boldsymbol{\imath}} +\newcommand{\bjmath}{\boldsymbol{\jmath}} +\newcommand{\binfty}{\boldsymbol{\infty}} +\newcommand{\bnabla}{\boldsymbol{\nabla}} +\newcommand{\bdot}{\boldsymbol{\cdot}} +% +% Symbols for caption +% +\renewcommand{\opensquare}{\mbox{$\square$}} +\renewcommand{\opentriangle}{\mbox{$\vartriangle$}} +\renewcommand{\opentriangledown}{\mbox{$\triangledown$}} +\renewcommand{\opendiamond}{\mbox{$\lozenge$}} +\renewcommand{\fullsquare}{\mbox{$\blacksquare$}} +\newcommand{\fulldiamond}{\mbox{$\blacklozenge$}} +\newcommand{\fullstar}{\mbox{$\bigstar$}} +\newcommand{\fulltriangle}{\mbox{$\blacktriangle$}} +\newcommand{\fulltriangledown}{\mbox{$\blacktriangledown$}} + +\endinput +%% +%% End of file `iopams.sty'. diff --git a/contributions/sd_iam/jpconf.cls b/contributions/sd_iam/jpconf.cls new file mode 100644 index 0000000000000000000000000000000000000000..09f509fdcfde0543cfbc37e4f64c02e11d9b4972 --- /dev/null +++ b/contributions/sd_iam/jpconf.cls @@ -0,0 +1,957 @@ +\NeedsTeXFormat{LaTeX2e}[1995/12/01] +\ProvidesClass{jpconf} + [2007/03/07 v1.1 + LaTeX class for Journal of Physics: Conference Series] +%\RequirePackage{graphicx} +\newcommand\@ptsize{1} +\newif\if@restonecol +\newif\if@letterpaper +\newif\if@titlepage +\newif\ifiopams +\@titlepagefalse +\@letterpaperfalse +\DeclareOption{a4paper} + {\setlength\paperheight {297mm}% + \setlength\paperwidth {210mm}% +\@letterpaperfalse} +\DeclareOption{letterpaper} + {\setlength\paperheight {279.4mm}% + \setlength\paperwidth {215.9mm}% +\@letterpapertrue} +\DeclareOption{landscape} + {\setlength\@tempdima {\paperheight}% + \setlength\paperheight {\paperwidth}% + \setlength\paperwidth {\@tempdima}} +\DeclareOption{twoside}{\@twosidetrue \@mparswitchtrue} +\renewcommand\@ptsize{1} +%\ExecuteOptions{A4paper, twoside} +\ExecuteOptions{A4paper} +\ProcessOptions +\DeclareMathAlphabet{\bi}{OML}{cmm}{b}{it} +\DeclareMathAlphabet{\bcal}{OMS}{cmsy}{b}{n} +\input{jpconf1\@ptsize.clo} +\setlength\lineskip{1\p@} +\setlength\normallineskip{1\p@} +\renewcommand\baselinestretch{} +\setlength\parskip{0\p@ \@plus \p@} +\@lowpenalty 51 +\@medpenalty 151 +\@highpenalty 301 +\setlength\parindent{5mm} +\setcounter{topnumber}{8} +\renewcommand\topfraction{1} +\setcounter{bottomnumber}{3} +\renewcommand\bottomfraction{.99} +\setcounter{totalnumber}{8} +\renewcommand\textfraction{0.01} +\renewcommand\floatpagefraction{.8} +\setcounter{dbltopnumber}{6} +\renewcommand\dbltopfraction{1} +\renewcommand\dblfloatpagefraction{.8} +\renewcommand{\title}{\@ifnextchar[{\@stitle}{\@ftitle}} +\pretolerance=5000 +\tolerance=8000 +% Headings for all pages apart from first +% +\def\ps@headings{% + \let\@oddfoot\@empty + \let\@evenfoot\@empty + \let\@oddhead\@empty + \let\@evenhead\@empty + %\def\@evenhead{\thepage\hfil\itshape\rightmark}% + %\def\@oddhead{{\itshape\leftmark}\hfil\thepage}% + %\def\@evenhead{{\itshape Journal of Physics: Conference Series}\hfill}% + %\def\@oddhead{\hfill {\itshape Journal of Physics: Conference Series}}%% + \let\@mkboth\markboth + \let\sectionmark\@gobble + \let\subsectionmark\@gobble} +% +% Headings for first page +% +\def\ps@myheadings{\let\@oddfoot\@empty\let\@evenfoot\@empty + \let\@oddhead\@empty\let\@evenhead\@empty + \let\@mkboth\@gobbletwo + \let\sectionmark\@gobble + \let\subsectionmark\@gobble} +% +\def\@stitle[#1]#2{\markboth{#1}{#1}% + %\pagestyle{empty}% + \thispagestyle{myheadings} + \vspace*{25mm}{\exhyphenpenalty=10000\hyphenpenalty=10000 + %\Large +\fontsize{18bp}{24bp}\selectfont\bf\raggedright\noindent#2\par}} +\def\@ftitle#1{\markboth{#1}{#1}% + \thispagestyle{myheadings} +%\pagestyle{empty}% + \vspace*{25mm}{\exhyphenpenalty=10000\hyphenpenalty=10000 + %\Large\raggedright\noindent\bf#1\par} +\fontsize{18bp}{24bp}\selectfont\bf\noindent\raggedright#1\par}} +%AUTHOR +\renewcommand{\author}{\@ifnextchar[{\@sauthor}{\@fauthor}} +\def\@sauthor[#1]#2{\markright{#1} % for production only + \vspace*{1.5pc}% + \begin{indented}% + \item[]\normalsize\bf\raggedright#2 + \end{indented}% + \smallskip} +\def\@fauthor#1{%\markright{#1} for production only + \vspace*{1.5pc}% + \begin{indented}% + \item[]\normalsize\bf\raggedright#1 + \end{indented}% + \smallskip} +%E-MAIL +\def\eads#1{\vspace*{5pt}\address{E-mail: #1}} +\def\ead#1{\vspace*{5pt}\address{E-mail: \mailto{#1}}} +\def\mailto#1{{\tt #1}} +%ADDRESS +\newcommand{\address}[1]{\begin{indented} + \item[]\rm\raggedright #1 + \end{indented}} +\newlength{\indentedwidth} +\newdimen\mathindent +\mathindent = 6pc +\indentedwidth=\mathindent +% FOOTNOTES +%\renewcommand\footnoterule{% +% \kern-3\p@ +% \hrule\@width.4\columnwidth +% \kern2.6\p@} +%\newcommand\@makefntext[1]{% +% \parindent 1em% +% \noindent +% \hb@xt@1.8em{\hss\@makefnmark}#1} +% Footnotes: symbols selected in same order as address indicators +% unless optional argument of [<num>] use to specify required symbol, +% 1=\dag, 2=\ddag, etc +% Usage: \footnote{Text of footnote} +% \footnote[3]{Text of footnote} +% +\def\footnoterule{}% +\setcounter{footnote}{0} +\long\def\@makefntext#1{\parindent 1em\noindent + \makebox[1em][l]{\footnotesize\rm$\m@th{\fnsymbol{footnote}}$}% + \footnotesize\rm #1} +\def\@makefnmark{\normalfnmark} +\def\normalfnmark{\hbox{${\fnsymbol{footnote}}\m@th$}} +\def\altfnmark{\hbox{$^{\rm Note}\ {\fnsymbol{footnote}}\m@th$}} +\def\footNote#1{\let\@makefnmark\altfnmark\footnote{#1}\let\@makefnmark\normalfnmark} +\def\@thefnmark{\fnsymbol{footnote}} +\def\footnote{\protect\pfootnote} +\def\pfootnote{\@ifnextchar[{\@xfootnote}{\stepcounter{\@mpfn}% + \begingroup\let\protect\noexpand + \xdef\@thefnmark{\thempfn}\endgroup + \@footnotemark\@footnotetext}} +\def\@xfootnote[#1]{\setcounter{footnote}{#1}% + \addtocounter{footnote}{-1}\footnote} + +\newcommand\ftnote{\protect\pftnote} +\newcommand\pftnote[1]{\setcounter{footnote}{#1}% + \addtocounter{footnote}{-1}\footnote} +\newcommand{\fnm}[1]{\setcounter{footnote}{#1}\footnotetext} + +\def\@fnsymbol#1{\ifnum\thefootnote=99\hbox{*}\else^{\thefootnote}\fi\relax} +% +% Address marker +% +\newcommand{\ad}[1]{\noindent\hbox{$^{#1}$}\relax} +\newcommand{\adnote}[2]{\noindent\hbox{$^{#1,}$}\setcounter{footnote}{#2}% + \addtocounter{footnote}{-1}\footnote} +\def\@tnote{} +\newcounter{oldftnote} +\newcommand{\tnote}[1]{*\gdef\@tnote{% + \setcounter{oldftnote}{\c@footnote}% + \setcounter{footnote}{99}% + \footnotetext{#1}% + \setcounter{footnote}{\c@oldftnote}\addtocounter{footnote}{-1}}} +%================== +% Acknowledgments (no heading if letter) +% Usage \ack for Acknowledgments, \ackn for Acknowledgement +\def\ack{\section*{Acknowledgments}} +\def\ackn{\section*{Acknowledgment}} +%SECTION DEFINITIONS +\setcounter{secnumdepth}{3} +\newcounter {section} +\newcounter {subsection}[section] +\newcounter {subsubsection}[subsection] +\newcounter {paragraph}[subsubsection] +\newcounter {subparagraph}[paragraph] +\renewcommand \thesection {\arabic{section}} +\renewcommand\thesubsection {\thesection.\arabic{subsection}} +\renewcommand\thesubsubsection{\thesubsection .\arabic{subsubsection}} +\renewcommand\theparagraph {\thesubsubsection.\arabic{paragraph}} +\renewcommand\thesubparagraph {\theparagraph.\arabic{subparagraph}} +%\nosections +\def\nosections{\vspace{30\p@ plus12\p@ minus12\p@} + \noindent\ignorespaces} + +%\renewcommand{\@startsection}[6] +%{% +%\if@noskipsec \leavevmode \fi +%\par +% \@tempskipa #4\relax +%%\@tempskipa 0pt\relax +% \@afterindenttrue +% \ifdim \@tempskipa <\z@ +% \@tempskipa -\@tempskipa \@afterindentfalse +% \fi +% \if@nobreak +% \everypar{}% +% \else +% \addpenalty\@secpenalty\addvspace\@tempskipa +% \fi +% \@ifstar +% {\@ssect{#3}{#4}{#5}{#6}}% +% {\@dblarg{\@sect{#1}{#2}{#3}{#4}{#5}{#6}}}} +%\renewcommand{\@sect}[8]{% +% \ifnum #2>\c@secnumdepth +% \let\@svsec\@empty +% \else +% \refstepcounter{#1}% +% \protected@edef\@svsec{\@seccntformat{#1}\relax}% +% \fi +% \@tempskipa #5\relax +% \ifdim \@tempskipa>\z@ +% \begingroup +% #6{% +% \@hangfrom{\hskip #3\relax\@svsec}% +% \interlinepenalty \@M #8\@@par}% +% \endgroup +% \csname #1mark\endcsname{#7}% +% \addcontentsline{toc}{#1}{% +% \ifnum #2>\c@secnumdepth \else +% \protect\numberline{\csname the#1\endcsname}% +% \fi +% #7}% +% \else +% \def\@svsechd{% +% #6{\hskip #3\relax +% \@svsec #8}% +% \csname #1mark\endcsname{#7}% +% \addcontentsline{toc}{#1}{% +% \ifnum #2>\c@secnumdepth \else +% \protect\numberline{\csname the#1\endcsname}% +% \fi +% #7}}% +% \fi +% \@xsect{#5}} +%\renewcommand{\@xsect}[1]{% +% \@tempskipa #1\relax +% \ifdim \@tempskipa>\z@ +% \par \nobreak +% \vskip \@tempskipa +% \@afterheading +% \else +% \@nobreakfalse +% \global\@noskipsectrue +% \everypar{% +% \if@noskipsec +% \global\@noskipsecfalse +% {\setbox\z@\lastbox}% +% \clubpenalty\@M +% \begingroup \@svsechd \endgroup +% \unskip +% \@tempskipa #1\relax +% \hskip -\@tempskipa +% \else +% \clubpenalty \@clubpenalty +% \everypar{}% +% \fi}% +% \fi +% \ignorespaces} +%======================================================================== +\newcommand\section{\@startsection {section}{1}{\z@}% + {-3.25ex\@plus -1ex \@minus -.2ex}% + {1sp}% + {\reset@font\normalsize\bfseries\raggedright}} +\newcommand\subsection{\@startsection{subsection}{2}{\z@}% + {-3.25ex\@plus -1ex \@minus -.2ex}% + {1sp}% + {\reset@font\normalsize\itshape\raggedright}} +\newcommand\subsubsection{\@startsection{subsubsection}{3}{\z@}% + {-3.25ex\@plus -1ex \@minus -.2ex}% + {-1em \@plus .2em}% + {\reset@font\normalsize\itshape}} +\newcommand\paragraph{\@startsection{paragraph}{4}{\z@}% + {3.25ex \@plus1ex \@minus.2ex}% + {-1em}% + {\reset@font\normalsize\itshape}} +\newcommand\subparagraph{\@startsection{subparagraph}{5}{\parindent}% + {3.25ex \@plus1ex \@minus .2ex}% + {-1em}% + {\reset@font\normalsize\itshape}} +\def\@sect#1#2#3#4#5#6[#7]#8{\ifnum #2>\c@secnumdepth + \let\@svsec\@empty\else + \refstepcounter{#1}\edef\@svsec{\csname the#1\endcsname. }\fi + \@tempskipa #5\relax + \ifdim \@tempskipa>\z@ + \begingroup #6\relax + \noindent{\hskip #3\relax\@svsec}{\interlinepenalty \@M #8\par}% + \endgroup + \csname #1mark\endcsname{#7}\addcontentsline + {toc}{#1}{\ifnum #2>\c@secnumdepth \else + \protect\numberline{\csname the#1\endcsname}\fi + #7}\else + \def\@svsechd{#6\hskip #3\relax %% \relax added 2 May 90 + \@svsec #8\csname #1mark\endcsname + {#7}\addcontentsline + {toc}{#1}{\ifnum #2>\c@secnumdepth \else + \protect\numberline{\csname the#1\endcsname}\fi + #7}}\fi + \@xsect{#5}} +% +\def\@ssect#1#2#3#4#5{\@tempskipa #3\relax + \ifdim \@tempskipa>\z@ + \begingroup #4\noindent{\hskip #1}{\interlinepenalty \@M #5\par}\endgroup + \else \def\@svsechd{#4\hskip #1\relax #5}\fi + \@xsect{#3}} +% LIST DEFINITIONS +\setlength\leftmargini {2em} +\leftmargin \leftmargini +\setlength\leftmarginii {2em} +\setlength\leftmarginiii {1.8em} +\setlength\leftmarginiv {1.6em} + \setlength\leftmarginv {1em} + \setlength\leftmarginvi {1em} +\setlength\leftmargin{\leftmargini} +\setlength \labelsep {.5em} +\setlength \labelwidth{\leftmargini} +\addtolength\labelwidth{-\labelsep} +\@beginparpenalty -\@lowpenalty +\@endparpenalty -\@lowpenalty +\@itempenalty -\@lowpenalty +\renewcommand\theenumi{\roman{enumi}} +\renewcommand\theenumii{\alph{enumii}} +\renewcommand\theenumiii{\arabic{enumiii}} +\renewcommand\theenumiv{\Alph{enumiv}} +\newcommand\labelenumi{(\theenumi)} +\newcommand\labelenumii{(\theenumii)} +\newcommand\labelenumiii{\theenumiii.} +\newcommand\labelenumiv{(\theenumiv)} +\renewcommand\p@enumii{(\theenumi)} +\renewcommand\p@enumiii{(\theenumi.\theenumii)} +\renewcommand\p@enumiv{(\theenumi.\theenumii.\theenumiii)} +\newcommand\labelitemi{$\m@th\bullet$} +\newcommand\labelitemii{\normalfont\bfseries --} +\newcommand\labelitemiii{$\m@th\ast$} +\newcommand\labelitemiv{$\m@th\cdot$} +\renewcommand \theequation {\@arabic\c@equation} + +%%%%%%%%%%%%% Figures +\newcounter{figure} +\renewcommand\thefigure{\@arabic\c@figure} +\def\fps@figure{tbp} +\def\ftype@figure{1} +\def\ext@figure{lof} +\def\fnum@figure{\figurename~\thefigure} +\newenvironment{figure}{\footnotesize\rm\@float{figure}}% + {\end@float\normalsize\rm} +\newenvironment{figure*}{\footnotesize\rm\@dblfloat{figure}}{\end@dblfloat} +\newcounter{table} +\renewcommand\thetable{\@arabic\c@table} +\def\fps@table{tbp} +\def\ftype@table{2} +\def\ext@table{lot} +\def\fnum@table{\tablename~\thetable} +\newenvironment{table}{\footnotesize\rm\@float{table}}% + {\end@float\normalsize\rm} +\newenvironment{table*}{\footnotesize\rm\@dblfloat{table}}% + {\end@dblfloat\normalsize\rm} +\newlength\abovecaptionskip +\newlength\belowcaptionskip +\setlength\abovecaptionskip{10\p@} +\setlength\belowcaptionskip{0\p@} +%Table Environments +%\newenvironment{tableref}[3][\textwidth]{% +%\begin{center}% +%\begin{table}% +%\captionsetup[table]{width=#1} +%\centering\caption{\label{#2}#3}}{\end{table}\end{center}} +%%%%%%%%%%%%%%%%% +%\newcounter{figure} +%\renewcommand \thefigure {\@arabic\c@figure} +%\def\fps@figure{tbp} +%\def\ftype@figure{1} +%\def\ext@figure{lof} +%\def\fnum@figure{\figurename~\thefigure} +%ENVIRONMENT: figure +%\newenvironment{figure} +% {\@float{figure}} +% {\end@float} +%ENVIRONMENT: figure* +%\newenvironment{figure*} +% {\@dblfloat{figure}} +% {\end@dblfloat} +%ENVIRONMENT: table +%\newcounter{table} +%\renewcommand\thetable{\@arabic\c@table} +%\def\fps@table{tbp} +%\def\ftype@table{2} +%\def\ext@table{lot} +%\def\fnum@table{\tablename~\thetable} +%\newenvironment{table} +% {\@float{table}} +% {\end@float} +%ENVIRONMENT: table* +%\newenvironment{table*} +% {\@dblfloat{table}} +% {\end@dblfloat} +%\newlength\abovecaptionskip +%\newlength\belowcaptionskip +%\setlength\abovecaptionskip{10\p@} +%\setlength\belowcaptionskip{0\p@} +% CAPTIONS +% Added redefinition of \@caption so captions are not written to +% aux file therefore less need to \protect fragile commands +% +\long\def\@caption#1[#2]#3{\par\begingroup + \@parboxrestore + \normalsize + \@makecaption{\csname fnum@#1\endcsname}{\ignorespaces #3}\par + \endgroup} +\long\def\@makecaption#1#2{% + \vskip\abovecaptionskip + \sbox\@tempboxa{{\bf #1.} #2}% + \ifdim \wd\@tempboxa >\hsize + {\bf #1.} #2\par + \else + \global \@minipagefalse + \hb@xt@\hsize{\hfil\box\@tempboxa\hfil}% + \fi + \vskip\belowcaptionskip} +\DeclareOldFontCommand{\rm}{\normalfont\rmfamily}{\mathrm} +\DeclareOldFontCommand{\sf}{\normalfont\sffamily}{\mathsf} +\DeclareOldFontCommand{\tt}{\normalfont\ttfamily}{\mathtt} +\DeclareOldFontCommand{\bf}{\normalfont\bfseries}{\mathbf} +\DeclareOldFontCommand{\it}{\normalfont\itshape}{\mathit} +\DeclareOldFontCommand{\sl}{\normalfont\slshape}{\@nomath\sl} +\DeclareOldFontCommand{\sc}{\normalfont\scshape}{\@nomath\sc} +\DeclareRobustCommand*\cal{\@fontswitch\relax\mathcal} +\DeclareRobustCommand*\mit{\@fontswitch\relax\mathnormal} +%\newcommand\@pnumwidth{1.55em} +%\newcommand\@tocrmarg{2.55em} +%\newcommand\@dotsep{4.5} +%\setcounter{tocdepth}{3} +%\newcommand\tableofcontents{% +% \section*{\contentsname +% \@mkboth{% +% \MakeUppercase\contentsname}{\MakeUppercase\contentsname}}% +% \@starttoc{toc}% +% } +%\newcommand*\l@part[2]{% +% \ifnum \c@tocdepth >-2\relax +% \addpenalty\@secpenalty +% \addvspace{2.25em \@plus\p@}% +% \begingroup +% \parindent \z@ \rightskip \@pnumwidth +% \parfillskip -\@pnumwidth +% {\leavevmode +% \large \bfseries #1\hfil \hb@xt@\@pnumwidth{\hss #2}}\par +% \nobreak +% \if@compatibility +% \global\@nobreaktrue +% \everypar{\global\@nobreakfalse\everypar{}}% +% \fi +% \endgroup +% \fi} +%\newcommand*\l@section[2]{% +% \ifnum \c@tocdepth >\z@ +% \addpenalty\@secpenalty +% \addvspace{1.0em \@plus\p@}% +% \setlength\@tempdima{1.5em}% +% \begingroup +% \parindent \z@ \rightskip \@pnumwidth +% \parfillskip -\@pnumwidth +% \leavevmode \bfseries +% \advance\leftskip\@tempdima +% \hskip -\leftskip +% #1\nobreak\hfil \nobreak\hb@xt@\@pnumwidth{\hss #2}\par +% \endgroup +% \fi} +%\newcommand*\l@subsection{\@dottedtocline{2}{1.5em}{2.3em}} +%\newcommand*\l@subsubsection{\@dottedtocline{3}{3.8em}{3.2em}} +%\newcommand*\l@paragraph{\@dottedtocline{4}{7.0em}{4.1em}} +%\newcommand*\l@subparagraph{\@dottedtocline{5}{10em}{5em}} +%\newcommand\listoffigures{% +% \section*{\listfigurename +% \@mkboth{\MakeUppercase\listfigurename}% +% {\MakeUppercase\listfigurename}}% +% \@starttoc{lof}% +% } +%\newcommand*\l@figure{\@dottedtocline{1}{1.5em}{2.3em}} +%\newcommand\listoftables{% +% \section*{\listtablename +% \@mkboth{% +% \MakeUppercase\listtablename}{\MakeUppercase\listtablename}}% +% \@starttoc{lot}% +% } +%\let\l@table\l@figure +%====================================== +%ENVIRONMENTS +%====================================== +%ENVIRONMENT: indented +\newenvironment{indented}{\begin{indented}}{\end{indented}} +\newenvironment{varindent}[1]{\begin{varindent}{#1}}{\end{varindent}} +% +\def\indented{\list{}{\itemsep=0\p@\labelsep=0\p@\itemindent=0\p@ + \labelwidth=0\p@\leftmargin=\mathindent\topsep=0\p@\partopsep=0\p@ + \parsep=0\p@\listparindent=15\p@}\footnotesize\rm} +\let\endindented=\endlist +\def\varindent#1{\setlength{\varind}{#1}% + \list{}{\itemsep=0\p@\labelsep=0\p@\itemindent=0\p@ + \labelwidth=0\p@\leftmargin=\varind\topsep=0\p@\partopsep=0\p@ + \parsep=0\p@\listparindent=15\p@}\footnotesize\rm} +\let\endvarindent=\endlist +%ENVIRONMENT: abstract +\newenvironment{abstract}{% + \vspace{16pt plus3pt minus3pt} + \begin{indented} + \item[]{\bfseries \abstractname.}\quad\rm\ignorespaces} + {\end{indented}\vspace{10mm}} +%ENVIRONMENT: description +\newenvironment{description} + {\list{}{\labelwidth\z@ \itemindent-\leftmargin + \let\makelabel\descriptionlabel}} + {\endlist} +\newcommand\descriptionlabel[1]{\hspace\labelsep + \normalfont\bfseries #1} +%ENVIRONMENT: quotation +\newenvironment{quotation} + {\list{}{\listparindent 1.5em% + \itemindent \listparindent + \rightmargin \leftmargin + \parsep \z@ \@plus\p@}% + \item[]} + {\endlist} +%ENVIRONMENT: quote +\newenvironment{quote} + {\list{}{\rightmargin\leftmargin}% + \item[]} + {\endlist} +%ENVIRONMENT: verse +\newenvironment{verse} + {\let\\=\@centercr + \list{}{\itemsep \z@ + \itemindent -1.5em% + \listparindent\itemindent + \rightmargin \leftmargin + \advance\leftmargin 1.5em}% + \item[]} + {\endlist} +%ENVIRONMENT: bibliography +\newdimen\bibindent +\setlength\bibindent{1.5em} +\def\thebibliography#1{\list + {\hfil[\arabic{enumi}]}{\topsep=0\p@\parsep=0\p@ + \partopsep=0\p@\itemsep=0\p@ + \labelsep=5\p@\itemindent=-10\p@ + \settowidth\labelwidth{\footnotesize[#1]}% + \leftmargin\labelwidth + \advance\leftmargin\labelsep + \advance\leftmargin -\itemindent + \usecounter{enumi}}\footnotesize + \def\newblock{\ } + \sloppy\clubpenalty4000\widowpenalty4000 + \sfcode`\.=1000\relax} +\let\endthebibliography=\endlist +\def\numrefs#1{\begin{thebibliography}{#1}} +\def\endnumrefs{\end{thebibliography}} +\let\endbib=\endnumrefs +%%%%%%%%%%%%%%%%%% + + +%\newenvironment{thebibliography}[1] +% {\section*{References} +% \list{\@biblabel{\@arabic\c@enumiv}}% +% {\settowidth\labelwidth{\@biblabel{#1}}% +% \leftmargin\labelwidth +% \advance\leftmargin\labelsep +% \@openbib@code +% \usecounter{enumiv}% +% \let\p@enumiv\@empty +% \renewcommand\theenumiv{\@arabic\c@enumiv}}% +% \sloppy +% \clubpenalty4000 +% \@clubpenalty \clubpenalty +% \widowpenalty4000% +% \sfcode`\.\@m} +% {\def\@noitemerr +% {\@latex@warning{Empty `thebibliography' environment}}% +% \endlist} +%\newcommand\newblock{\hskip .11em\@plus.33em\@minus.07em} +%\let\@openbib@code\@empty +%ENVIRONMENT: theindex +\newenvironment{theindex} + {\if@twocolumn + \@restonecolfalse + \else + \@restonecoltrue + \fi + \columnseprule \z@ + \columnsep 35\p@ + \twocolumn[\section*{\indexname}]% + \@mkboth{\MakeUppercase\indexname}% + {\MakeUppercase\indexname}% + \thispagestyle{plain}\parindent\z@ + \parskip\z@ \@plus .3\p@\relax + \let\item\@idxitem} + {\if@restonecol\onecolumn\else\clearpage\fi} +\newcommand\@idxitem{\par\hangindent 40\p@} +\newcommand\subitem{\@idxitem \hspace*{20\p@}} +\newcommand\subsubitem{\@idxitem \hspace*{30\p@}} +\newcommand\indexspace{\par \vskip 10\p@ \@plus5\p@ \@minus3\p@\relax} +%===================== +\def\appendix{\@ifnextchar*{\@appendixstar}{\@appendix}} +\def\@appendix{\eqnobysec\@appendixstar} +\def\@appendixstar{\@@par + \ifnumbysec % Added 30/4/94 to get Table A1, + \@addtoreset{table}{section} % Table B1 etc if numbering by + \@addtoreset{figure}{section}\fi % section + \setcounter{section}{0} + \setcounter{subsection}{0} + \setcounter{subsubsection}{0} + \setcounter{equation}{0} + \setcounter{figure}{0} + \setcounter{table}{0} + \def\thesection{Appendix \Alph{section}} + \def\theequation{\ifnumbysec + \Alph{section}.\arabic{equation}\else + \Alph{section}\arabic{equation}\fi} % Comment A\arabic{equation} maybe + \def\thetable{\ifnumbysec % better? 15/4/95 + \Alph{section}\arabic{table}\else + A\arabic{table}\fi} + \def\thefigure{\ifnumbysec + \Alph{section}\arabic{figure}\else + A\arabic{figure}\fi}} +\def\noappendix{\setcounter{figure}{0} + \setcounter{table}{0} + \def\thetable{\arabic{table}} + \def\thefigure{\arabic{figure}}} +\setlength\arraycolsep{5\p@} +\setlength\tabcolsep{6\p@} +\setlength\arrayrulewidth{.4\p@} +\setlength\doublerulesep{2\p@} +\setlength\tabbingsep{\labelsep} +\skip\@mpfootins = \skip\footins +\setlength\fboxsep{3\p@} +\setlength\fboxrule{.4\p@} +\renewcommand\theequation{\arabic{equation}} +% NAME OF STRUCTURES +\newcommand\contentsname{Contents} +\newcommand\listfigurename{List of Figures} +\newcommand\listtablename{List of Tables} +\newcommand\refname{References} +\newcommand\indexname{Index} +\newcommand\figurename{Figure} +\newcommand\tablename{Table} +\newcommand\partname{Part} +\newcommand\appendixname{Appendix} +\newcommand\abstractname{Abstract} +%Miscellaneous commands +\newcommand{\BibTeX}{{\rm B\kern-.05em{\sc i\kern-.025em b}\kern-.08em + T\kern-.1667em\lower.7ex\hbox{E}\kern-.125emX}} +\newcommand{\jpcsit}{{\bfseries\itshape\selectfont Journal of Physics: Conference Series}} +\newcommand{\jpcs}{{\itshape\selectfont Journal of Physics: Conference Series}} +\newcommand{\iopp}{IOP Publishing} +\newcommand{\cls}{{\upshape\selectfont\texttt{jpconf.cls}}} +\newcommand{\corg}{conference organizer} +\newcommand\today{\number\day\space\ifcase\month\or + January\or February\or March\or April\or May\or June\or + July\or August\or September\or October\or November\or December\fi + \space\number\year} + \setlength\columnsep{10\p@} +\setlength\columnseprule{0\p@} +\newcommand{\Tables}{\clearpage\section*{Tables and table captions} +\def\fps@table{hp}\noappendix} +\newcommand{\Figures}{\clearpage\section*{Figure captions} + \def\fps@figure{hp}\noappendix} +% +\newcommand{\Figure}[1]{\begin{figure} + \caption{#1} + \end{figure}} +% +\newcommand{\Table}[1]{\begin{table} + \caption{#1} + \begin{indented} + \lineup + \item[]\begin{tabular}{@{}l*{15}{l}}} +\def\endTable{\end{tabular}\end{indented}\end{table}} +\let\endtab=\endTable +% +\newcommand{\fulltable}[1]{\begin{table} + \caption{#1} + \lineup + \begin{tabular*}{\textwidth}{@{}l*{15}{@{\extracolsep{0pt plus 12pt}}l}}} +\def\endfulltable{\end{tabular*}\end{table}} +%BIBLIOGRAPHY and References +%\newcommand{\Bibliography}[1]{\section*{References}\par\numrefs{#1}} +%\newcommand{\References}{\section*{References}\par\refs} +%\def\thebibliography#1{\list +% {\hfil[\arabic{enumi}]}{\topsep=0\p@\parsep=0\p@ +% \partopsep=0\p@\itemsep=0\p@ +% \labelsep=5\p@\itemindent=-10\p@ +% \settowidth\labelwidth{\footnotesize[#1]}% +% \leftmargin\labelwidth +% \advance\leftmargin\labelsep +% \advance\leftmargin -\itemindent +% \usecounter{enumi}}\footnotesize +% \def\newblock{\ } +% \sloppy\clubpenalty4000\widowpenalty4000 +% \sfcode`\.=1000\relax} +%\let\endthebibliography=\endlist +%\def\numrefs#1{\begin{thebibliography}{#1}} +%\def\endnumrefs{\end{thebibliography}} +%\let\endbib=\endnumrefs + +\def\thereferences{\list{}{\topsep=0\p@\parsep=0\p@ + \partopsep=0\p@\itemsep=0\p@\labelsep=0\p@\itemindent=-18\p@ +\labelwidth=0\p@\leftmargin=18\p@ +}\footnotesize\rm +\def\newblock{\ } +\sloppy\clubpenalty4000\widowpenalty4000 +\sfcode`\.=1000\relax}% +\let\endthereferences=\endlist +% MISC EQUATRION STUFF +%\def\[{\relax\ifmmode\@badmath\else +% \begin{trivlist} +% \@beginparpenalty\predisplaypenalty +% \@endparpenalty\postdisplaypenalty +% \item[]\leavevmode +% \hbox to\linewidth\bgroup$ \displaystyle +% \hskip\mathindent\bgroup\fi} +%\def\]{\relax\ifmmode \egroup $\hfil \egroup \end{trivlist}\else \@badmath \fi} +%\def\equation{\@beginparpenalty\predisplaypenalty +% \@endparpenalty\postdisplaypenalty +%\refstepcounter{equation}\trivlist \item[]\leavevmode +% \hbox to\linewidth\bgroup $ \displaystyle +%\hskip\mathindent} +%\def\endequation{$\hfil \displaywidth\linewidth\@eqnnum\egroup \endtrivlist} +%\@namedef{equation*}{\[} +%\@namedef{endequation*}{\]} +%\def\eqnarray{\stepcounter{equation}\let\@currentlabel=\theequation +%\global\@eqnswtrue +%\global\@eqcnt\z@\tabskip\mathindent\let\\=\@eqncr +%\abovedisplayskip\topsep\ifvmode\advance\abovedisplayskip\partopsep\fi +%\belowdisplayskip\abovedisplayskip +%\belowdisplayshortskip\abovedisplayskip +%\abovedisplayshortskip\abovedisplayskip +%$$\halign to +%\linewidth\bgroup\@eqnsel$\displaystyle\tabskip\z@ +% {##{}}$&\global\@eqcnt\@ne $\displaystyle{{}##{}}$\hfil +% &\global\@eqcnt\tw@ $\displaystyle{{}##}$\hfil +% \tabskip\@centering&\llap{##}\tabskip\z@\cr} +%\def\endeqnarray{\@@eqncr\egroup +% \global\advance\c@equation\m@ne$$\global\@ignoretrue } +%\mathindent = 6pc +%% +%\def\eqalign#1{\null\vcenter{\def\\{\cr}\openup\jot\m@th +% \ialign{\strut$\displaystyle{##}$\hfil&$\displaystyle{{}##}$\hfil +% \crcr#1\crcr}}\,} +%% +%\def\eqalignno#1{\displ@y \tabskip\z@skip +% \halign to\displaywidth{\hspace{5pc}$\@lign\displaystyle{##}$% +% \tabskip\z@skip +% &$\@lign\displaystyle{{}##}$\hfill\tabskip\@centering +% &\llap{$\@lign\hbox{\rm##}$}\tabskip\z@skip\crcr +% #1\crcr}} +%% +\newif\ifnumbysec +\def\theequation{\ifnumbysec + \arabic{section}.\arabic{equation}\else + \arabic{equation}\fi} +\def\eqnobysec{\numbysectrue\@addtoreset{equation}{section}} +\newcounter{eqnval} +\def\numparts{\addtocounter{equation}{1}% + \setcounter{eqnval}{\value{equation}}% + \setcounter{equation}{0}% + \def\theequation{\ifnumbysec + \arabic{section}.\arabic{eqnval}{\it\alph{equation}}% + \else\arabic{eqnval}{\it\alph{equation}}\fi}} +\def\endnumparts{\def\theequation{\ifnumbysec + \arabic{section}.\arabic{equation}\else + \arabic{equation}\fi}% + \setcounter{equation}{\value{eqnval}}} +% +\def\cases#1{% + \left\{\,\vcenter{\def\\{\cr}\normalbaselines\openup1\jot\m@th% + \ialign{\strut$\displaystyle{##}\hfil$&\tqs + \rm##\hfil\crcr#1\crcr}}\right.}% +\def\eqalign#1{\null\vcenter{\def\\{\cr}\openup\jot\m@th + \ialign{\strut$\displaystyle{##}$\hfil&$\displaystyle{{}##}$\hfil + \crcr#1\crcr}}\,} +% OTHER USEFUL BITS +\newcommand{\e}{\mathrm{e}} +\newcommand{\rme}{\mathrm{e}} +\newcommand{\rmi}{\mathrm{i}} +\newcommand{\rmd}{\mathrm{d}} +\renewcommand{\qquad}{\hspace*{25pt}} +\newcommand{\tdot}[1]{\stackrel{\dots}{#1}} % Added 1/9/94 +\newcommand{\tqs}{\hspace*{25pt}} +\newcommand{\fl}{\hspace*{-\mathindent}} +\newcommand{\Tr}{\mathop{\mathrm{Tr}}\nolimits} +\newcommand{\tr}{\mathop{\mathrm{tr}}\nolimits} +\newcommand{\Or}{\mathord{\mathrm{O}}} %changed from \mathop 20/1/95 +\newcommand{\lshad}{[\![} +\newcommand{\rshad}{]\!]} +\newcommand{\case}[2]{{\textstyle\frac{#1}{#2}}} +\def\pt(#1){({\it #1\/})} +\newcommand{\dsty}{\displaystyle} +\newcommand{\tsty}{\textstyle} +\newcommand{\ssty}{\scriptstyle} +\newcommand{\sssty}{\scriptscriptstyle} +\def\lo#1{\llap{${}#1{}$}} +\def\eql{\llap{${}={}$}} +\def\lsim{\llap{${}\sim{}$}} +\def\lsimeq{\llap{${}\simeq{}$}} +\def\lequiv{\llap{${}\equiv{}$}} +% +\newcommand{\eref}[1]{(\ref{#1})} +%\newcommand{\eqref}[1]{Equation (\ref{#1})} +%\newcommand{\Eqref}[1]{Equation (\ref{#1})} +\newcommand{\sref}[1]{section~\ref{#1}} +\newcommand{\fref}[1]{figure~\ref{#1}} +\newcommand{\tref}[1]{table~\ref{#1}} +\newcommand{\Sref}[1]{Section~\ref{#1}} +\newcommand{\Fref}[1]{Figure~\ref{#1}} +\newcommand{\Tref}[1]{Table~\ref{#1}} +\newcommand{\opencircle}{\mbox{\Large$\circ\,$}} % moved Large outside maths +\newcommand{\opensquare}{\mbox{$\rlap{$\sqcap$}\sqcup$}} +\newcommand{\opentriangle}{\mbox{$\triangle$}} +\newcommand{\opentriangledown}{\mbox{$\bigtriangledown$}} +\newcommand{\opendiamond}{\mbox{$\diamondsuit$}} +\newcommand{\fullcircle}{\mbox{{\Large$\bullet\,$}}} % moved Large outside maths +\newcommand{\fullsquare}{\,\vrule height5pt depth0pt width5pt} +\newcommand{\dotted}{\protect\mbox{${\mathinner{\cdotp\cdotp\cdotp\cdotp\cdotp\cdotp}}$}} +\newcommand{\dashed}{\protect\mbox{-\; -\; -\; -}} +\newcommand{\broken}{\protect\mbox{-- -- --}} +\newcommand{\longbroken}{\protect\mbox{--- --- ---}} +\newcommand{\chain}{\protect\mbox{--- $\cdot$ ---}} +\newcommand{\dashddot}{\protect\mbox{--- $\cdot$ $\cdot$ ---}} +\newcommand{\full}{\protect\mbox{------}} + +\def\;{\protect\psemicolon} +\def\psemicolon{\relax\ifmmode\mskip\thickmuskip\else\kern .3333em\fi} +\def\lineup{\def\0{\hbox{\phantom{0}}}% + \def\m{\hbox{$\phantom{-}$}}% + \def\-{\llap{$-$}}} +% +%%%%%%%%%%%%%%%%%%%%% +% Tables rules % +%%%%%%%%%%%%%%%%%%%%% + +\newcommand{\boldarrayrulewidth}{1\p@} +% Width of bold rule in tabular environment. + +\def\bhline{\noalign{\ifnum0=`}\fi\hrule \@height +\boldarrayrulewidth \futurelet \@tempa\@xhline} + +\def\@xhline{\ifx\@tempa\hline\vskip \doublerulesep\fi + \ifnum0=`{\fi}} + +% +% Rules for tables with extra space around +% +\newcommand{\br}{\ms\bhline\ms} +\newcommand{\mr}{\ms\hline\ms} +% +\newcommand{\centre}[2]{\multispan{#1}{\hfill #2\hfill}} +\newcommand{\crule}[1]{\multispan{#1}{\hspace*{\tabcolsep}\hrulefill + \hspace*{\tabcolsep}}} +\newcommand{\fcrule}[1]{\ifnum\thetabtype=1\multispan{#1}{\hrulefill + \hspace*{\tabcolsep}}\else\multispan{#1}{\hrulefill}\fi} +% +% Extra spaces for tables and displayed equations +% +\newcommand{\ms}{\noalign{\vspace{3\p@ plus2\p@ minus1\p@}}} +\newcommand{\bs}{\noalign{\vspace{6\p@ plus2\p@ minus2\p@}}} +\newcommand{\ns}{\noalign{\vspace{-3\p@ plus-1\p@ minus-1\p@}}} +\newcommand{\es}{\noalign{\vspace{6\p@ plus2\p@ minus2\p@}}\displaystyle}% +% +\newcommand{\etal}{{\it et al\/}\ } +\newcommand{\dash}{------} +\newcommand{\nonum}{\par\item[]} %\par added 1/9/93 +\newcommand{\mat}[1]{\underline{\underline{#1}}} +% +% abbreviations for IOPP journals +% +\newcommand{\CQG}{{\it Class. Quantum Grav.} } +\newcommand{\CTM}{{\it Combust. Theory Modelling\/} } +\newcommand{\DSE}{{\it Distrib. Syst. Engng\/} } +\newcommand{\EJP}{{\it Eur. J. Phys.} } +\newcommand{\HPP}{{\it High Perform. Polym.} } % added 4/5/93 +\newcommand{\IP}{{\it Inverse Problems\/} } +\newcommand{\JHM}{{\it J. Hard Mater.} } % added 4/5/93 +\newcommand{\JO}{{\it J. Opt.} } +\newcommand{\JOA}{{\it J. Opt. A: Pure Appl. Opt.} } +\newcommand{\JOB}{{\it J. Opt. B: Quantum Semiclass. Opt.} } +\newcommand{\JPA}{{\it J. Phys. A: Math. Gen.} } +\newcommand{\JPB}{{\it J. Phys. B: At. Mol. Phys.} } %1968-87 +\newcommand{\jpb}{{\it J. Phys. B: At. Mol. Opt. Phys.} } %1988 and onwards +\newcommand{\JPC}{{\it J. Phys. C: Solid State Phys.} } %1968--1988 +\newcommand{\JPCM}{{\it J. Phys.: Condens. Matter\/} } %1989 and onwards +\newcommand{\JPD}{{\it J. Phys. D: Appl. Phys.} } +\newcommand{\JPE}{{\it J. Phys. E: Sci. Instrum.} } +\newcommand{\JPF}{{\it J. Phys. F: Met. Phys.} } +\newcommand{\JPG}{{\it J. Phys. G: Nucl. Phys.} } %1975--1988 +\newcommand{\jpg}{{\it J. Phys. G: Nucl. Part. Phys.} } %1989 and onwards +\newcommand{\MSMSE}{{\it Modelling Simulation Mater. Sci. Eng.} } +\newcommand{\MST}{{\it Meas. Sci. Technol.} } %1990 and onwards +\newcommand{\NET}{{\it Network: Comput. Neural Syst.} } +\newcommand{\NJP}{{\it New J. Phys.} } +\newcommand{\NL}{{\it Nonlinearity\/} } +\newcommand{\NT}{{\it Nanotechnology} } +\newcommand{\PAO}{{\it Pure Appl. Optics\/} } +\newcommand{\PM}{{\it Physiol. Meas.} } % added 4/5/93 +\newcommand{\PMB}{{\it Phys. Med. Biol.} } +\newcommand{\PPCF}{{\it Plasma Phys. Control. Fusion\/} } % added 4/5/93 +\newcommand{\PSST}{{\it Plasma Sources Sci. Technol.} } +\newcommand{\PUS}{{\it Public Understand. Sci.} } +\newcommand{\QO}{{\it Quantum Opt.} } +\newcommand{\QSO}{{\em Quantum Semiclass. Opt.} } +\newcommand{\RPP}{{\it Rep. Prog. Phys.} } +\newcommand{\SLC}{{\it Sov. Lightwave Commun.} } % added 4/5/93 +\newcommand{\SST}{{\it Semicond. Sci. Technol.} } +\newcommand{\SUST}{{\it Supercond. Sci. Technol.} } +\newcommand{\WRM}{{\it Waves Random Media\/} } +\newcommand{\JMM}{{\it J. Micromech. Microeng.\/} } +% +% Other commonly quoted journals +% +\newcommand{\AC}{{\it Acta Crystallogr.} } +\newcommand{\AM}{{\it Acta Metall.} } +\newcommand{\AP}{{\it Ann. Phys., Lpz.} } +\newcommand{\APNY}{{\it Ann. Phys., NY\/} } +\newcommand{\APP}{{\it Ann. Phys., Paris\/} } +\newcommand{\CJP}{{\it Can. J. Phys.} } +\newcommand{\JAP}{{\it J. Appl. Phys.} } +\newcommand{\JCP}{{\it J. Chem. Phys.} } +\newcommand{\JJAP}{{\it Japan. J. Appl. Phys.} } +\newcommand{\JP}{{\it J. Physique\/} } +\newcommand{\JPhCh}{{\it J. Phys. Chem.} } +\newcommand{\JMMM}{{\it J. Magn. Magn. Mater.} } +\newcommand{\JMP}{{\it J. Math. Phys.} } +\newcommand{\JOSA}{{\it J. Opt. Soc. Am.} } +\newcommand{\JPSJ}{{\it J. Phys. Soc. Japan\/} } +\newcommand{\JQSRT}{{\it J. Quant. Spectrosc. Radiat. Transfer\/} } +\newcommand{\NC}{{\it Nuovo Cimento\/} } +\newcommand{\NIM}{{\it Nucl. Instrum. Methods\/} } +\newcommand{\NP}{{\it Nucl. Phys.} } +\newcommand{\PL}{{\it Phys. Lett.} } +\newcommand{\PR}{{\it Phys. Rev.} } +\newcommand{\PRL}{{\it Phys. Rev. Lett.} } +\newcommand{\PRS}{{\it Proc. R. Soc.} } +\newcommand{\PS}{{\it Phys. Scr.} } +\newcommand{\PSS}{{\it Phys. Status Solidi\/} } +\newcommand{\PTRS}{{\it Phil. Trans. R. Soc.} } +\newcommand{\RMP}{{\it Rev. Mod. Phys.} } +\newcommand{\RSI}{{\it Rev. Sci. Instrum.} } +\newcommand{\SSC}{{\it Solid State Commun.} } +\newcommand{\ZP}{{\it Z. Phys.} } +%=================== +\pagestyle{headings} +\pagenumbering{arabic} +\raggedbottom +\onecolumn +\endinput +%% +%% End of file `jconf.cls'. diff --git a/contributions/sd_iam/jpconf11.clo b/contributions/sd_iam/jpconf11.clo new file mode 100644 index 0000000000000000000000000000000000000000..63541cbb98638b86bbc1df2d09f4eafbe3233a42 --- /dev/null +++ b/contributions/sd_iam/jpconf11.clo @@ -0,0 +1,141 @@ +%% +%% This is file `jpconf11.clo' +%% +%% This file is distributed in the hope that it will be useful, +%% but WITHOUT ANY WARRANTY; without even the implied warranty of +%% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +%% +%% \CharacterTable +%% {Upper-case \A\B\C\D\E\F\G\H\I\J\K\L\M\N\O\P\Q\R\S\T\U\V\W\X\Y\Z +%% Lower-case \a\b\c\d\e\f\g\h\i\j\k\l\m\n\o\p\q\r\s\t\u\v\w\x\y\z +%% Digits \0\1\2\3\4\5\6\7\8\9 +%% Exclamation \! Double quote \" Hash (number) \# +%% Dollar \$ Percent \% Ampersand \& +%% Acute accent \' Left paren \( Right paren \) +%% Asterisk \* Plus \+ Comma \, +%% Minus \- Point \. Solidus \/ +%% Colon \: Semicolon \; Less than \< +%% Equals \= Greater than \> Question mark \? +%% Commercial at \@ Left bracket \[ Backslash \\ +%% Right bracket \] Circumflex \^ Underscore \_ +%% Grave accent \` Left brace \{ Vertical bar \| +%% Right brace \} Tilde \~} +\ProvidesFile{jpconf11.clo}[2005/05/04 v1.0 LaTeX2e file (size option)] +\renewcommand\normalsize{% + \@setfontsize\normalsize\@xipt{13}% + \abovedisplayskip 12\p@ \@plus3\p@ \@minus7\p@ + \abovedisplayshortskip \z@ \@plus3\p@ + \belowdisplayshortskip 6.5\p@ \@plus3.5\p@ \@minus3\p@ + \belowdisplayskip \abovedisplayskip + \let\@listi\@listI} +\normalsize +\newcommand\small{% + \@setfontsize\small\@xpt{12}% + \abovedisplayskip 11\p@ \@plus3\p@ \@minus6\p@ + \abovedisplayshortskip \z@ \@plus3\p@ + \belowdisplayshortskip 6.5\p@ \@plus3.5\p@ \@minus3\p@ + \def\@listi{\leftmargin\leftmargini + \topsep 9\p@ \@plus3\p@ \@minus5\p@ + \parsep 4.5\p@ \@plus2\p@ \@minus\p@ + \itemsep \parsep}% + \belowdisplayskip \abovedisplayskip} +\newcommand\footnotesize{% +% \@setfontsize\footnotesize\@xpt\@xiipt + \@setfontsize\footnotesize\@ixpt{11}% + \abovedisplayskip 10\p@ \@plus2\p@ \@minus5\p@ + \abovedisplayshortskip \z@ \@plus3\p@ + \belowdisplayshortskip 6\p@ \@plus3\p@ \@minus3\p@ + \def\@listi{\leftmargin\leftmargini + \topsep 6\p@ \@plus2\p@ \@minus2\p@ + \parsep 3\p@ \@plus2\p@ \@minus\p@ + \itemsep \parsep}% + \belowdisplayskip \abovedisplayskip +} +\newcommand\scriptsize{\@setfontsize\scriptsize\@viiipt{9.5}} +\newcommand\tiny{\@setfontsize\tiny\@vipt\@viipt} +\newcommand\large{\@setfontsize\large\@xivpt{18}} +\newcommand\Large{\@setfontsize\Large\@xviipt{22}} +\newcommand\LARGE{\@setfontsize\LARGE\@xxpt{25}} +\newcommand\huge{\@setfontsize\huge\@xxvpt{30}} +\let\Huge=\huge +\if@twocolumn + \setlength\parindent{14\p@} + \else + \setlength\parindent{18\p@} +\fi +\if@letterpaper% +%\input{letmarg.tex}% +\setlength{\hoffset}{0mm} +\setlength{\marginparsep}{0mm} +\setlength{\marginparwidth}{0mm} +\setlength{\textwidth}{160mm} +\setlength{\oddsidemargin}{-0.4mm} +\setlength{\evensidemargin}{-0.4mm} +\setlength{\voffset}{0mm} +\setlength{\headheight}{8mm} +\setlength{\headsep}{5mm} +\setlength{\footskip}{0mm} +\setlength{\textheight}{230mm} +\setlength{\topmargin}{1.6mm} +\else +%\input{a4marg.tex}% +\setlength{\hoffset}{0mm} +\setlength{\marginparsep}{0mm} +\setlength{\marginparwidth}{0mm} +\setlength{\textwidth}{160mm} +\setlength{\oddsidemargin}{-0.4mm} +\setlength{\evensidemargin}{-0.4mm} +\setlength{\voffset}{0mm} +\setlength{\headheight}{8mm} +\setlength{\headsep}{5mm} +\setlength{\footskip}{0mm} +\setlength{\textheight}{230mm} +\setlength{\topmargin}{1.6mm} +\fi +\setlength\maxdepth{.5\topskip} +\setlength\@maxdepth\maxdepth +\setlength\footnotesep{8.4\p@} +\setlength{\skip\footins} {10.8\p@ \@plus 4\p@ \@minus 2\p@} +\setlength\floatsep {14\p@ \@plus 2\p@ \@minus 4\p@} +\setlength\textfloatsep {24\p@ \@plus 2\p@ \@minus 4\p@} +\setlength\intextsep {16\p@ \@plus 4\p@ \@minus 4\p@} +\setlength\dblfloatsep {16\p@ \@plus 2\p@ \@minus 4\p@} +\setlength\dbltextfloatsep{24\p@ \@plus 2\p@ \@minus 4\p@} +\setlength\@fptop{0\p@} +\setlength\@fpsep{10\p@ \@plus 1fil} +\setlength\@fpbot{0\p@} +\setlength\@dblfptop{0\p@} +\setlength\@dblfpsep{10\p@ \@plus 1fil} +\setlength\@dblfpbot{0\p@} +\setlength\partopsep{3\p@ \@plus 2\p@ \@minus 2\p@} +\def\@listI{\leftmargin\leftmargini + \parsep=\z@ + \topsep=6\p@ \@plus3\p@ \@minus3\p@ + \itemsep=3\p@ \@plus2\p@ \@minus1\p@} +\let\@listi\@listI +\@listi +\def\@listii {\leftmargin\leftmarginii + \labelwidth\leftmarginii + \advance\labelwidth-\labelsep + \topsep=3\p@ \@plus2\p@ \@minus\p@ + \parsep=\z@ + \itemsep=\parsep} +\def\@listiii{\leftmargin\leftmarginiii + \labelwidth\leftmarginiii + \advance\labelwidth-\labelsep + \topsep=\z@ + \parsep=\z@ + \partopsep=\z@ + \itemsep=\z@} +\def\@listiv {\leftmargin\leftmarginiv + \labelwidth\leftmarginiv + \advance\labelwidth-\labelsep} +\def\@listv{\leftmargin\leftmarginv + \labelwidth\leftmarginv + \advance\labelwidth-\labelsep} +\def\@listvi {\leftmargin\leftmarginvi + \labelwidth\leftmarginvi + \advance\labelwidth-\labelsep} +\endinput +%% +%% End of file `iopart12.clo'. diff --git a/contributions/sd_iam/main.tex b/contributions/sd_iam/main.tex new file mode 100644 index 0000000000000000000000000000000000000000..c043856c46469d6a87b1a6132368c482e2baae30 --- /dev/null +++ b/contributions/sd_iam/main.tex @@ -0,0 +1,157 @@ +\documentclass[a4paper]{jpconf} + +\usepackage{url} +\usepackage{graphicx} +\usepackage{float} + +\newcommand{\quotes}[1]{``#1''} + +\begin{document} + +\title{Evolving the INDIGO IAM service} + +\author{ + A.~Ceccanti$^1$, + E.~Vianello$^1$ +} + +\address{$^1$ INFN-CNAF, Bologna, IT} + +\ead{ + andrea.ceccanti@cnaf.infn.it, + enrico.vianello@cnaf.infn.it +} + +\begin{abstract} + + The INDIGO Identity and Access Management (IAM) service has been designed and + developed at CNAF in the context of the INDIGO Datacloud project. In this + contribution, we describe the work done in 2018 to evolve and operate the IAM + service in support of several scientific communities and use cases. + +\end{abstract} + +\section*{Introduction} +\label{sec:introduction} + +The INDIGO IAM service provides an integrated solution for securing access to +an organization resources and services. It supports authentication via Identity +federations (e.g., EduGAIN \cite{edugain}) and social logins (i.e., Google +\cite{google}), a registration service providing moderated access to the +organization, delegation and provisioning APIs and flexible account linking. + +During 2018, the main focus of the work on IAM was to enhance its functionalities +in order to fully support the requirements emerging from the +WLCG Authorization Working Group, in support of the design of the future WLCG Authorization +service~\cite{wlcg-authz-wg}. + +The following paragraphs summarize the main development and maintenance +activities. + +\section*{Support for multiple external OpenID Connect providers} + +\begin{figure} + \begin{minipage}[b]{.45\textwidth} + \centering + \includegraphics[width=\textwidth]{mutliple-oidc.png} + \caption{\label{fig:oidc-providers}The DODAS IAM login page showcasing support for Google, EduGAIN and EGI CheckIn external authentication.} + \end{minipage} + \hspace{.1\textwidth} + \begin{minipage}[b]{.45\textwidth} + \centering + \includegraphics[width=\textwidth]{request-cert.png} + \caption{\label{fig:request-cert}Requesting the on-demand generation of an X.509 certificate from the IAM dashboard.} + \end{minipage} +\end{figure} + +Up to IAM version 1.4.0, IAM supported a single OpenID Connect provider, Google. +The support for authentication and account linking with an external OpenID Connect provider has been extended to +allow multiple providers. +Each provider can be listed in the IAM login page, and login buttons text and appearance can be customized with +appropriate configuration (see Figure ~\ref{fig:oidc-providers}). + +\section*{RCAuth.eu integration} + +The RCauth.eu~\cite{rcauth} Pilot Certificate Authority (CA) is an online CA operated by NIKHEF~\cite{nikhef} which +issues certificates to end-entities based on a successful authentication to a Federated Identity Management System +(FIMS) operated by an eligible Registration Authority. +The certificates issued by the RCauth Pilot CA are valid for a period of at most 13 months, but may be as short as 11 days. + +RCAuth.eu has been integrated with INDIGO IAM in order to provide on-demand X.509 certificates to users without a certificate. +The certificate is obtained using a simple OAuth-based protocol~\cite{oauth4myproxy}. + +When the RCAuth.eu integration is enabled, IAM provides users with the ability to request a certificate on-demand from the IAM dashboard, as shown in Figure~\ref{fig:request-cert}. + +What happens under the hood is that the user is redirected to the RCAuth.eu +instance to be authenticated and give consent to the generation of an X.509 certificate and that such certificate is accesible by IAM. +Once the user has given its consent, IAM fetches the generated certificate from RCAuth.eu and +creates a proxy certificate out of it that is then stored in the IAM database and linked to the user membership. + +A certificate provisioning API has also been implemented that allows users/agents with the appropriate privileges to +obtain the proxy certificate stored in the IAM database. + +\section*{CERN HR DB API service and integration} + +Identity vetting for the LHC VOs deployed at CERN rely on the VOMS Admin~\cite{voms-admin} CERN Human Resource database +integration~\cite{voms-convergence}, in order to verify that a VOMS applicant has a +valid LHC experiment membership while registered in VOMS. + +In order to expose HR database identity vetting also the IAM, the logic +of the HR database querying has been extracted from the VOMS Admin codebase and adapted to schema changes planned for GDPR compliance. + +A Spring boot microservice has been developed to provide a convenient REST API to query information about LHC experiment membership~\cite{hr-db-api-service}. This microservice has been deployed at CERN and integrated in IAM to demonstrate identity-vetting based on HR information, supporting a registration flow similar to the one implemented in production by VOMS Admin and that would satisfy the requirements expressed by the WLCG authorization working group. + +\section*{VOMS provisioning} + +A VOMS~\cite{VOMS} Attribute Authority (AA) microservice has been developed to expose IAM VO membership attributes in the form of VOMS attribute certificates. The VOMS microservice talks to the IAM DB and leverages IAM support for x.509 authentication. +The service is compatible with existing voms clients. + +Since IAM does not provide a role abstraction, and that VOMS roles are +equivalent to group membership asserted on request, a mechanism based on labels has been developed to flag some IAM groups as VOMS roles. +These groups are not automatically included in generated VOMS ACs, but are instead returned using the VOMS role syntax only on explicit request from client, preserving the original VOMS role semantics. + +With this work, IAM can support a gradual and seamless migration from WLCG legacy AAI based on X.509 and VOMS to a token-based AAI. + +\section*{Labels and Attribute API} + +A generic Labels API, inspired by the Kubernetes~\cite{kubernetes-labels} labels API, has been introduced in IAM that allows privileged users/agents to attach labels to groups and users. + +These labels can be used internally by IAM (e.g., to provide additional metadata about users/groups status, to implement VOMS role semantics on top of IAM groups) or by external applications. A URI-based namespace mechanism +is supported to avoid name clashes on attributes managed by different applications. + +A generic Attribute API has been introduced to allow to link key-value pairs to users and groups. This information is meant to provide additional authentication/authorization information related to users and groups that can be included, if requested by the configuration, in tokens issued by IAM, +providing a mechanism very similar to VOMS generic attributes. + +\section*{Flexible notification dispatching} + +A more flexible notification dispatching mechanism has been added to IAM. +which provides the ability to target VO users, admnistrators or group +administrators individually for email notifications. Previously, and up to IAM version 1.4.0, all administrator-targeted notifications were dispatched to an email address (typically, a mailing list) provided in configuration. + +\section*{Group managers and group request support} + +IAM now supports group managers, which are privileged users that can approve +group membership requests or add users to a the managed group. +Users can now request to join a group from the IAM dashboard home page. + +\section*{Improved SAML support} + +Significant work has been put in improving SAML support and integration +with identity federations such as EduGAIN or SAML identity providers (e.g., the CERN Single Sign-On~\cite{cern-sso} and the italian Sistema Pubblico per l'Identit\`a digitale (SPID)~\cite{spid}). + +\section*{Operations and support} + +Several IAM instances have been deployed and operated on our Kubernetes~\cite{kubernetes} infrastructure in support of scientific communities and projects (DODAS, CHNet, Dariah, Deep Hybrid Datacloud, ICCU, Virgo). + +An IAM instance dedicated to WLCG authorization WG demonstration and integration activities has been deployed on the CERN Openshift~\cite{cern-openshift} and Openstack ~\cite{openstack} infrastructures. + +\section*{Conclusions and future work} + +We have described the main development and maintenance activities performed on the IAM service during 2018. In the future we will focus on further +enhancements to the service and on the migration of the core authentication module to Keycloak~\cite{keycloak}, in order to reduce maintenance costs and improve IAM integration flexibility. + +\bibliographystyle{iopart-num} +\section*{References} +\bibliography{biblio} + +\end{document} diff --git a/contributions/sd_iam/mutliple-oidc.png b/contributions/sd_iam/mutliple-oidc.png new file mode 100644 index 0000000000000000000000000000000000000000..540c26acd493cea6b3adbf3d687afadaa65e1910 Binary files /dev/null and b/contributions/sd_iam/mutliple-oidc.png differ diff --git a/contributions/sd_iam/request-cert.png b/contributions/sd_iam/request-cert.png new file mode 100644 index 0000000000000000000000000000000000000000..fd3a154f2e9a0b6058c31ac3007831a8235b0b76 Binary files /dev/null and b/contributions/sd_iam/request-cert.png differ diff --git a/contributions/sd_nginx_voms/biblio.bib b/contributions/sd_nginx_voms/biblio.bib new file mode 100644 index 0000000000000000000000000000000000000000..7bee7b5f6a52ea0d2bdf8e14607afa2743a70820 --- /dev/null +++ b/contributions/sd_nginx_voms/biblio.bib @@ -0,0 +1,101 @@ +@misc{rfc5280, + series = {Request for Comments}, + number = 5280, + howpublished = {RFC 5280}, + publisher = {RFC Editor}, + doi = {10.17487/RFC5280}, + url = {https://rfc-editor.org/rfc/rfc5280.txt}, + author = {Sharon Boeyen and Stefan Santesson and Tim Polk and Russ Housley and Stephen Farrell and Dave Cooper}, + title = {{Internet X.509 Public Key Infrastructure Certificate and Certificate Revocation List (CRL) Profile}}, + pagetotal = 151, + year = 2008, + month = may, + abstract = {This memo profiles the X.509 v3 certificate and X.509 v2 certificate revocation list (CRL) for use in the Internet. An overview of this approach and model is provided as an introduction. The X.509 v3 certificate format is described in detail, with additional information regarding the format and semantics of Internet name forms. Standard certificate extensions are described and two Internet-specific extensions are defined. A set of required certificate extensions is specified. The X.509 v2 CRL format is described in detail along with standard and Internet-specific extensions. An algorithm for X.509 certification path validation is described. An ASN.1 module and examples are provided in the appendices. {[}STANDARDS-TRACK{]}}, +} + +@misc{rfc3820, + series = {Request for Comments}, + number = 3820, + howpublished = {RFC 3820}, + publisher = {RFC Editor}, + doi = {10.17487/RFC3820}, + url = {https://rfc-editor.org/rfc/rfc3820.txt}, + author = {Von Welch and Mary Thompson and Douglas E. Engert and Steven Tuecke and Laura Pearlman}, + title = {{Internet X.509 Public Key Infrastructure (PKI) Proxy Certificate Profile}}, + pagetotal = 37, + year = 2004, + month = jun, + abstract = {This document forms a certificate profile for Proxy Certificates, based on X.509 Public Key Infrastructure (PKI) certificates as defined in RFC 3280, for use in the Internet. The term Proxy Certificate is used to describe a certificate that is derived from, and signed by, a normal X.509 Public Key End Entity Certificate or by another Proxy Certificate for the purpose of providing restricted proxying and delegation within a PKI based authentication system. {[}STANDARDS-TRACK{]}}, +} + +@misc{rfc5755, + series = {Request for Comments}, + number = 5755, + howpublished = {RFC 5755}, + publisher = {RFC Editor}, + doi = {10.17487/RFC5755}, + url = {https://rfc-editor.org/rfc/rfc5755.txt}, + author = {Sean Turner and Stephen Farrell and Russ Housley}, + title = {{An Internet Attribute Certificate Profile for Authorization}}, + pagetotal = 50, + year = 2010, + month = jan, + abstract = {This specification defines a profile for the use of X.509 Attribute Certificates in Internet Protocols. Attribute certificates may be used in a wide range of applications and environments covering a broad spectrum of interoperability goals and a broader spectrum of operational and assurance requirements. The goal of this document is to establish a common baseline for generic applications requiring broad interoperability as well as limited special purpose requirements. The profile places emphasis on attribute certificate support for Internet electronic mail, IPsec, and WWW security applications. This document obsoletes RFC 3281. {[}STANDARDS-TRACK{]}}, +} + +@misc{nginxvoms-cnafar-2016, + author = {Paulon, S and Giacomini, F and Ceccanti, A}, + title = {{A VOMS module for the NGINX web server}}, + howpublished = {{INFN-CNAF Annual Report}}, + year = 2016, + url = {https://www.cnaf.infn.it/annual-report}, +} + +@InProceedings{voms, +author="Alfieri, R. +and Cecchini, R. +and Ciaschini, V. +and dell'Agnello, L. +and Frohner, {\'A}. +and Gianoli, A. +and L{\~o}rentey, K. +and Spataro, F.", +editor="Fern{\'a}ndez Rivera, Francisco +and Bubak, Marian +and G{\'o}mez Tato, Andr{\'e}s +and Doallo, Ram{\'o}n", +title="VOMS, an Authorization System for Virtual Organizations", +booktitle="Grid Computing", +year=2004, +publisher="Springer Berlin Heidelberg", +address="Berlin, Heidelberg", +pages="33--40", +abstract="We briefly describe the authorization requirements, focusing on the framework of the DataGrid and DataTAG Projects and illustrate the architecture of a new service we have developed, the Virtual Organization Membership Service (VOMS), to manage authorization information in Virtual Organization scope.", +isbn="978-3-540-24689-3" +} + +@Misc{module-baltig, + title = {\verb"ngx_http_voms_module"}, + note = {\url{https://baltig.infn.it/storm2/ngx\_http\_voms\_module}}} + +@Misc{nginx, + title = {Nginx}, + note = {\url{https://nginx.org/}} +} + +@Misc{openresty, + title = {{OpenResty}}, + note = {\url{https://openresty.org/}}} + +@Misc{test::nginx, + title = {{Test::Nginx}}, + note = {\url{https://metacpan.org/pod/Test::Nginx}}} + +@Misc{voms-github, + title = {{VOMS}}, + note = {\url{https://github.com/italiangrid/voms}}} + +@Misc{openssl, + title = {{OpenSSL}}, + note = {\url{https://www.openssl.org/}}} + diff --git a/contributions/sd_nginx_voms/deployment.png b/contributions/sd_nginx_voms/deployment.png new file mode 100644 index 0000000000000000000000000000000000000000..05ae03e7dac34b11b3033963cd5761177738abdd Binary files /dev/null and b/contributions/sd_nginx_voms/deployment.png differ diff --git a/contributions/sd_nginx_voms/main.tex b/contributions/sd_nginx_voms/main.tex new file mode 100644 index 0000000000000000000000000000000000000000..e3d5cbb8e7fcfeb33a716f719c20fe5b0dd60cc6 --- /dev/null +++ b/contributions/sd_nginx_voms/main.tex @@ -0,0 +1,244 @@ +\documentclass[a4paper]{jpconf} +\usepackage[T1]{fontenc} +\usepackage[utf8]{inputenc} +\usepackage{graphicx} + +\bibliographystyle{iopart-num} + +\begin{document} + +\title{A VOMS module for the Nginx web server} +\author{A. Ceccanti$^1$, F. Giacomini$^1$, E. Ronchieri$^1$, N. Terranova$^1$} +\address{$^1$ INFN-CNAF, Bologna, IT} +\ead{francesco.giacomini@cnaf.infn.it} + +\begin{abstract} We present the motivation, the design and some + implementation hints of a software module for the Nginx web server + aimed at extracting the attributes of a VOMS-issued Attribute + Certificate during a client authentication based on an X.509 Proxy + Certificate. The module enables the use of Nginx as a reverse proxy + to a Grid service, relieving it from most of the work necessary to + authenticate a client. +\end{abstract} + +\section{Introduction} + +Every Grid service needs to authenticate and possibly authorize every +request that it receives. The authentication is historically based on +X.509 Proxy Certificates~\cite{rfc5280}, extended with Attribute +Certificates~\cite{rfc5755} obtained from a VOMS +service~\cite{voms}. An Attribute Certificate is conceptually an +assertion signed by the VOMS service that declares the groups the user +submitting the request belongs to within a Virtual Organization +(e.g. a scientific collaboration) and the possible roles they have +within those groups. + +As a consequence, a Grid service must carry out a number of +security-related steps before even starting its own business logic: +\begin{itemize} +\item offer an HTTPS endpoint; +\item perform X.509 certificate-based client authentication; +\item extract the VOMS attributes, on which it could later base an + authorization decision. +\end{itemize} + +The purpose of this work is to factor those three actions out of a +Grid service into a common service-independent module to be run by a +reverse proxy deployed in front of the service. The ideal deployment +model is shown in Figure~\ref{fig:deployment}. If the reverse proxy +and the actual service run in a trusted zone, the communication +between the two can even happen over plain HTTP. + +\begin{figure} + \begin{center} + \includegraphics[width=.9\textwidth]{deployment} + \caption{\label{fig:deployment}Deployment model enabled by the + VOMS module run in a front-end service acting as a reverse proxy + towards the actual service running as the back-end.} + \end{center} +\end{figure} + +After a first prototype~\cite{nginxvoms-cnafar-2016} was prepared as +part of a master thesis, a properly engineered version has been +developed for production use. + +\section{Nginx} + +The reverse proxy of choice is Nginx~\cite{nginx}. Nginx is an efficient HTTP and +reverse proxy server, a mail proxy server, and a generic TCP/UDP proxy +server, which serves or proxies a large fraction of the busiest +sites on the Internet. + +Nginx has a modular architecture: the core software components provide +a solid and efficient foundation to dispatch events (e.g. from +networking) to one or more worker processes to be asynchronously +managed. Additional functionality is further provided by +\textit{modules} that are linked to the Nginx executable. Modules can +also be loaded dynamically at run time, yet they need to be known at +compile time. As a consequence, the typical way to obtain an Nginx +executable is to compile the corresponding source code, specifying the +exact selection of modules needed for the specific deployment. + +To simplify our build and testing setup, we have opted to base our +work on top of the OpenResty~\cite{openresty} distribution of Nginx, +which includes already a selection of useful modules. + +However we cannot use the Nginx code as-is, for it is unaware of the +delegation mechanism designed by Globus for the Grid Security +Infrastructure on which all Grid services are based. The delegation +mechanism introduces a control character in the HTTP protocol, between +the SSL/TLS handshake and the transmission of the method. Although we +do not intend to support the delegation mechanism in this work, we +wrote a minimal patch to the Nginx request-parsing code, so that it +can accept that extra character and ignore it if it means ``no +delegation''. + +An additional patch has been proposed for Nginx upstream, to enable +the support for proxy certificates via a flag in the configuration +file, after recent versions of OpenSSL~\cite{openssl} have removed the +possibility to enable it via an environment variable. Since proxy +certificates are not widely used outside of the Grid world, the patch +has not been accepted; despite being a more convenient and clearer +solution, the patch is not even applied locally, since there is a +workaround applicable directly in our module's code, as described +below. + +An instance of Nginx can be configured (and re-configured) through a +file read by the master process. The configuration file can specify, +among many other things, that Nginx runs in reverse-proxy mode and how +the request should be passed to the upstream server (i.e. the +back-end). The syntax of the configuration file allows for the use of +\textit{variables} (in our case, for example, there would be one +called \texttt{\$voms\_fqans}), whose value is provided by a specific +handler that gets executed when the corresponding variable is used. + +\section{Building and testing} + +Though the development can happen on any platform, the reference +platform for the deployment at the moment is CentOS 7. Moreover, as +mentioned above, we have elected the OpenResty distribution as the +basis for our own Nginx module. A Docker image combining CentOs 7 and +OpenResty is available both for local builds and for the continuous +integration builds bound to the \textit{git} repository that hosts the +source code. + +The Docker image is augmented with the necessary dependencies (such as +the VOMS libraries) and with the elected development tools, mostly +installed through the software collections \textit{Developer Toolset + 7} (to have a compiler that supports C++ 2017 and various +sanitizers) and \textit{LLVM Toolset 7} (to have the code formatter +and the static analyzer). + +The testing is based on \texttt{Test::Nginx}~\cite{test::nginx}, a +Perl-based testing framework that comes with OpenResty, that allows +the specification of tests with a declarative syntax. We struggled to +find a way to enable TLS client authentication, but the result is very +satisfactory. + +The build and the tests are automatically run every time some code is +pushed to the reference git repository~\cite{module-baltig}. + +\section{The VOMS module} + +The purpose of the VOMS module is to extract the information available +in the Attribute Certificate embedded in the X.509 Proxy Certificate +used for the authentication of the client and make it available as +Nginx variables. The variables can then be used in the configuration +file to form the request that is passed upstream by the reverse proxy. + +The variables correspond very closely to the fields of the +\texttt{voms} data structure found in the API of the VOMS C++ +library~\cite{voms-github}. For example: + +\begin{description} +\item[\texttt{voms\_user}] The Subject of the End-Entity certificate, used to sign the proxy. +\item[\texttt{voms\_fqans}] A comma-separated list of Fully Qualified Attribute + Names. +\item[\texttt{voms\_vo}] The name of the Virtual Organization (VO) to which the End Entity belongs. +\item[\texttt{voms\_not\_before}] The date before which the Attribute + Certificate is not yet valid. +\item[\texttt{voms\_not\_after}] The date after which the Attribute Certificate + is not valid anymore. +\end{description} + +The module consists mainly of the handlers that are called when a +variable is referenced in the configuration file. The information +needed to give a value to the variables comes from invocations of the +VOMS library API; that information is obtained as a by-product of the +validation of the X.509 certificate chain presented by the client, +including the VOMS Attribute Certificate. Since such a validation is +expensive, performing it every time a handler is called is best +avoided and a caching strategy is preferable. The caching can be +applied at multiple levels: for each request, for each connection +(multiple requests can be sent over the same connection), for multiple +connections authenticated with the same client proxy certificate. At +the moment the caching is applied at connection level, but moving it +to the next level is already planned. + +As mentioned above, the module also enables the support for proxy +certificates in OpenSSL, which in recent versions of the library is +not available any more through setting an environment variable. This +is done calling the appropriate OpenSSL API functions in a handler +that gets executed at the end of the Nginx configuration phase, at a +time when the SSL certificate store, the data structure containing the +flag that enables the acceptance of proxies, is initialized and +available for manipulation. + +\section{Deployment} + +Once the module described in this work is linked to the Nginx +executable, in order to deploy a Grid service according to the model +sketched in the Introduction, the typical configuration for an Nginx +instance used as a reverse proxy in front of the actual Grid service +would include directives similar to the following. + +{\small +\begin{verbatim} + +server { + + listen 443 ssl; + ssl_certificate /certs/cert.pem; + ssl_certificate_key /certs/key.pem; + ssl_client_certificate /etc/pki/tls/certs/ca-bundle.crt; + ssl_verify_client on; + ssl_verify_depth 100; + + location / { + + proxy_set_header Voms-User $voms_user; + proxy_set_header Voms-Fqans $voms_fqans; + proxy_set_header Voms-Vo $voms_vo; + proxy_set_header Voms-Not-Before $voms_not_before; + proxy_set_header Voms-Not-After $voms_not_after; + + proxy_pass http://back-end; + } +} + +\end{verbatim} +} +The service running on the back-end would then receive requests over +plain HTTP and will find among its headers all the VOMS information +needed to apply its own authorization policies. + +\section{Conclusions and future work} + +In this paper we have shown how an Nginx reverse proxy equipped with +the described module could relieve a Grid service from most of the +work necessary to authenticate a client presenting credentials based +on X.509 Proxy Certificates extended with a VOMS-issued Attribute +Certificate. + +The module is ready for production use and is currently part of the +development effort aimed at revising the implementation of the StoRM +service. + +The main planned development concerns the improvement of the caching +of the information obtained during the validation of VOMS Attribute +Certificates. + +\section*{References} +\bibliography{biblio} + +\end{document} diff --git a/contributions/sd_storm/biblio.bib b/contributions/sd_storm/biblio.bib new file mode 100644 index 0000000000000000000000000000000000000000..b5095fd8f0d8d0870c8738d3ce336fb9344908a1 --- /dev/null +++ b/contributions/sd_storm/biblio.bib @@ -0,0 +1,529 @@ +@techreport{jwt, + author = {Michael B. Jones and John Bradley and Nat Sakimura}, + title = {{The JSON Web Token RFC}}, + type = {RFC}, + number = 7519, + year = {2015}, + month = {May}, + issn = {2070-1721}, + publisher = {IETF Tools}, + institution = {IETF Tools}, + url = {https://tools.ietf.org/rfc/rfc7519.txt} +} + +@techreport{oauth, + author = {Dick Hardt}, + title = {{The OAuth 2.0 Authorization Framework}}, + type = {RFC}, + number = 6749, + year = {2012}, + month = {October}, + issn = {2070-1721}, + publisher = {IETF Tools}, + institution = {IETF Tools}, + url = {https://tools.ietf.org/rfc/rfc6749.txt} +} + +@techreport{oauth-token-exchange, + author = {Michael B. Jones and Anthony Nadalin and Brian Campbell + and John Bradley and Chuck Mortimore}, + title = {{OAuth 2.0 Token Exchange}}, + type = {Internet-Draft}, + number = "draft-ietf-oauth-token-exchange-16.txt", + year = {2019}, + month = {April}, + day = {22}, + institution = {IETF Tools}, + url = {https://tools.ietf.org/id/draft-ietf-oauth-token-exchange-16.txt} +} + +@techreport{oauth-metadata, + author = {Michael B. Jones and Nat Sakimura and John Bradley}, + title = {{OAuth 2.0 Authorization Server Metadata}}, + type = {RFC}, + number = 8414, + year = {2018}, + month = {June}, + issn = {2070-1721}, + publisher = {IETF Tools}, + institution = {IETF Tools}, + url = {https://tools.ietf.org/rfc/rfc8414.txt} +} + +@online{oidc, + author = {{OpenID Foundation}}, + title = {{The OpenID Connect identity layer}}, + year = 2018, + url = {https://openid.net/connect/}, + urldate = {2018-12-03} +} + +@online{oidc-discovery, + author = {{Nat Sakimura and John Bradley and Michael B. Jones and Edmund Jay}}, + title = {{The OpenID Connect discovery specification}}, + year = 2014, + url = {https://openid.net/specs/openid-connect-discovery-1_0.html}, + urldate = {2018-12-03} +} + +@online{igtf, + title = {{The Interoperable Global Trust Federation}}, + url = {https://www.igtf.net/}, + urldate = {2018-12-03} +} + +@online{x509, + title = {{X.509}}, + url = {https://en.wikipedia.org/wiki/X.509}, + urldate = {2018-12-03} +} + +@article{GSI, + author = {Von Welch and + Frank Siebenlist and + Ian T. Foster and + John Bresnahan and + Karl Czajkowski and + Jarek Gawor and + Carl Kesselman and + Sam Meder and + Laura Pearlman and + Steven Tuecke}, + title = {Security for Grid Services}, + journal = {CoRR}, + volume = {cs.CR/0306129}, + year = {2003}, + url = {http://arxiv.org/abs/cs.CR/0306129}, + timestamp = {Mon, 13 Aug 2018 16:49:07 +0200}, + biburl = {https://dblp.org/rec/bib/journals/corr/cs-CR-0306129}, + bibsource = {dblp computer science bibliography, https://dblp.org} +} + +@software{VOMS, + author = {Vincenzo Ciaschini and Valerio Venturi and Andrea Ceccanti}, + title = {{The Virtual Organisation Membership Service}}, + doi = {10.5281/zenodo.1875371}, + url = {https://doi.org/10.5281/zenodo.1875371} +} + +@online{edugain, + title = {{eduGAIN interfederation website}}, + url = {http://www.geant.org/Services/Trust_identity_and_security/eduGAIN}, + urldate = {2018-12-03} +} + +@online{google, + title = {{The Google Identity Platform}}, + url = {https://developers.google.com/identity/}, + urldate = {2018-12-03} +} + +@online{scim, + title = {{The System for Cross Domain Identity Management website}}, + url = {http://www.simplecloud.info/}, + urldate = {2018-12-03} +} + +@article{indigo-aai-chep2016, + author={Andrea Ceccanti and Marcus Hardt and Bas Wegh and A. Paul Millar + and Marco Caberletti and Enrico Vianello and Slavek Licehammer}, + title={{The INDIGO-Datacloud Authentication and Authorization Infrastructure}}, + journal={Journal of Physics: Conference Series}, + volume={898}, + number={10}, + pages={102016}, + url={http://iopscience.iop.org/article/10.1088/1742-6596/898/10/102016}, + year={2017} +} + + +@software{iam, + author = {Andrea Ceccanti and Enrico Vianello and Marco Caberletti}, + title = {{INDIGO Identity and Access Management (IAM)}}, + doi = {10.5281/zenodo.1874790}, + url = {https://doi.org/10.5281/zenodo.1874790} +} + + +@software{voms-admin, + author = {Andrea Ceccanti}, + title = {{The VOMS administration service}}, + doi = {10.5281/zenodo.1875616}, + url = {https://doi.org/10.5281/zenodo.1875616} +} + +@misc{cwp, +Author = {{HEP Software Foundation} and Johannes Albrecht and Antonio + Augusto {Alves} Jr and Guilherme Amadio and Giuseppe Andronico and Nguyen + Anh-Ky and Laurent Aphecetche and John Apostolakis and Makoto Asai and Luca + Atzori and Marian Babik and Giuseppe Bagliesi and Marilena Bandieramonte + and Sunanda Banerjee and Martin Barisits and Lothar A. T. Bauerdick and + Stefano Belforte and Douglas Benjamin and Catrin Bernius and Wahid Bhimji + and Riccardo Maria Bianchi and Ian Bird and Catherine Biscarat and Jakob + Blomer and Kenneth Bloom and Tommaso Boccali and Brian Bockelman and Tomasz + Bold and Daniele Bonacorsi and Antonio Boveia and Concezio Bozzi and Marko + Bracko and David Britton and Andy Buckley and Predrag Buncic and Paolo + Calafiura and Simone Campana and Philippe Canal and Luca Canali and + Gianpaolo Carlino and Nuno Castro and Marco Cattaneo and Gianluca Cerminara + and Javier Cervantes Villanueva and Philip Chang and John Chapman and Gang + Chen and Taylor Childers and Peter Clarke and Marco Clemencic and Eric + Cogneras and Jeremy Coles and Ian Collier and David Colling and Gloria + Corti and Gabriele Cosmo and Davide Costanzo and Ben Couturier and Kyle + Cranmer and Jack Cranshaw and Leonardo Cristella and David Crooks and + Sabine Crépé-Renaudin and Robert Currie and Sünje Dallmeier-Tiessen and + Kaushik De and Michel De Cian and Albert De Roeck and Antonio Delgado Peris + and Frédéric Derue and Alessandro Di Girolamo and Salvatore Di Guida and + Gancho Dimitrov and Caterina Doglioni and Andrea Dotti and Dirk Duellmann + and Laurent Duflot and Dave Dykstra and Katarzyna Dziedziniewicz-Wojcik and + Agnieszka Dziurda and Ulrik Egede and Peter Elmer and Johannes Elmsheuser + and V. Daniel Elvira and Giulio Eulisse and Steven Farrell and Torben + Ferber and Andrej Filipcic and Ian Fisk and Conor Fitzpatrick and José Flix + and Andrea Formica and Alessandra Forti and Giovanni Franzoni and James + Frost and Stu Fuess and Frank Gaede and Gerardo Ganis and Robert Gardner + and Vincent Garonne and Andreas Gellrich and Krzysztof Genser and Simon + George and Frank Geurts and Andrei Gheata and Mihaela Gheata and Francesco + Giacomini and Stefano Giagu and Manuel Giffels and Douglas Gingrich and + Maria Girone and Vladimir V. Gligorov and Ivan Glushkov and Wesley Gohn and + Jose Benito Gonzalez Lopez and Isidro González Caballero and Juan R. + González Fernández and Giacomo Govi and Claudio Grandi and Hadrien Grasland + and Heather Gray and Lucia Grillo and Wen Guan and Oliver Gutsche and + Vardan Gyurjyan and Andrew Hanushevsky and Farah Hariri and Thomas Hartmann + and John Harvey and Thomas Hauth and Benedikt Hegner and Beate Heinemann + and Lukas Heinrich and Andreas Heiss and José M. Hernández and Michael + Hildreth and Mark Hodgkinson and Stefan Hoeche and Burt Holzman and Peter + Hristov and Xingtao Huang and Vladimir N. Ivanchenko and Todor Ivanov and + Jan Iven and Brij Jashal and Bodhitha Jayatilaka and Roger Jones and Michel + Jouvin and Soon Yung Jun and Michael Kagan and Charles William Kalderon and + Meghan Kane and Edward Karavakis and Daniel S. Katz and Dorian Kcira and + Oliver Keeble and Borut Paul Kersevan and Michael Kirby and Alexei + Klimentov and Markus Klute and Ilya Komarov and Dmitri Konstantinov and + Patrick Koppenburg and Jim Kowalkowski and Luke Kreczko and Thomas Kuhr and + Robert Kutschke and Valentin Kuznetsov and Walter Lampl and Eric Lancon and + David Lange and Mario Lassnig and Paul Laycock and Charles Leggett and + James Letts and Birgit Lewendel and Teng Li and Guilherme Lima and Jacob + Linacre and Tomas Linden and Miron Livny and Giuseppe Lo Presti and + Sebastian Lopienski and Peter Love and Adam Lyon and Nicolò Magini and + Zachary L. Marshall and Edoardo Martelli and Stewart Martin-Haugh and Pere + Mato and Kajari Mazumdar and Thomas McCauley and Josh McFayden and Shawn + McKee and Andrew McNab and Rashid Mehdiyev and Helge Meinhard and Dario + Menasce and Patricia Mendez Lorenzo and Alaettin Serhan Mete and Michele + Michelotto and Jovan Mitrevski and Lorenzo Moneta and Ben Morgan and + Richard Mount and Edward Moyse and Sean Murray and Armin Nairz and Mark S. + Neubauer and Andrew Norman and Sérgio Novaes and Mihaly Novak and Arantza + Oyanguren and Nurcan Ozturk and Andres Pacheco Pages and Michela Paganini + and Jerome Pansanel and Vincent R. Pascuzzi and Glenn Patrick and Alex + Pearce and Ben Pearson and Kevin Pedro and Gabriel Perdue and Antonio + Perez-Calero Yzquierdo and Luca Perrozzi and Troels Petersen and Marko + Petric and Andreas Petzold and Jónatan Piedra and Leo Piilonen and Danilo + Piparo and Jim Pivarski and Witold Pokorski and Francesco Polci and Karolos + Potamianos and Fernanda Psihas and Albert Puig Navarro and Günter Quast and + Gerhard Raven and Jürgen Reuter and Alberto Ribon and Lorenzo Rinaldi and + Martin Ritter and James Robinson and Eduardo Rodrigues and Stefan Roiser + and David Rousseau and Gareth Roy and Grigori Rybkine and Andre Sailer and + Tai Sakuma and Renato Santana and Andrea Sartirana and Heidi Schellman and + Jaroslava Schovancová and Steven Schramm and Markus Schulz and Andrea + Sciabà and Sally Seidel and Sezen Sekmen and Cedric Serfon and Horst + Severini and Elizabeth Sexton-Kennedy and Michael Seymour and Davide + Sgalaberna and Illya Shapoval and Jamie Shiers and Jing-Ge Shiu and Hannah + Short and Gian Piero Siroli and Sam Skipsey and Tim Smith and Scott Snyder + and Michael D. Sokoloff and Panagiotis Spentzouris and Hartmut Stadie and + Giordon Stark and Gordon Stewart and Graeme A. Stewart and Arturo Sánchez + and Alberto Sánchez-Hernández and Anyes Taffard and Umberto Tamponi and + Jeff Templon and Giacomo Tenaglia and Vakhtang Tsulaia and Christopher + Tunnell and Eric Vaandering and Andrea Valassi and Sofia Vallecorsa and + Liviu Valsan and Peter Van Gemmeren and Renaud Vernet and Brett Viren and + Jean-Roch Vlimant and Christian Voss and Margaret Votava and Carl Vuosalo + and Carlos Vázquez Sierra and Romain Wartel and Gordon T. Watts and Torre + Wenaus and Sandro Wenzel and Mike Williams and Frank Winklmeier and + Christoph Wissing and Frank Wuerthwein and Benjamin Wynne and Zhang Xiaomei + and Wei Yang and Efe Yazgan}, Title = {{A Roadmap for HEP Software and + Computing R\&D for the 2020s}}, Year = {2017}, Eprint = {arXiv:1712.06982}, +} + +@online{scitokens, + title = {{The SciTokens project}}, + url = {https://scitokens.org}, + urldate = {2018-12-03} +} + +@online{kubernetes, + title = {{The Kubernetes container orchestrator}}, + url = {https://kubernetes.io}, + urldate = {2018-12-03} +} + +@online{openstack, + title = {{The Openstack IAAS framework}}, + url = {https://www.openstack.org}, + urldate = {2018-12-03} +} + +@online{fts, + title = {{The CERN File Transfer Service}}, + url = {https://fts.web.cern.ch}, + urldate = {2018-12-03} +} + +@online{storm, + title = {{The StoRM storage element}}, + url = {https://italiangrid.github.io/storm}, + urldate = {2019-05-15} +} + +@online{dcache, + title = {{The XRootD software framework}}, + url = {https://dcache.org}, + urldate = {2019-05-15} +} + +@online{xrootd, + title = {{The dCache storage solution}}, + url = {http://xrootd.org/}, + urldate = {2019-05-15} +} + +@online{dpm, + title = {{DPM - Disk Pool Manager}}, + url = {http://lcgdm.web.cern.ch/dpm}, + urldate = {2019-05-15} +} + +@online{eos, + title = {{EOS Open Storage}}, + url = {http://eos.web.cern.ch/}, + urldate = {2019-05-15} +} + +@online{oidc-rande, + title = {{The OpenID Research \& Education working group}}, + url = {https://openid.net/wg/rande}, + urldate = {2018-12-03} +} + +@techreport{voms-ac-format, + author = {Vincenzo Ciaschini and Valerio Venturi and Andrea Ceccanti}, + title = {{The VOMS Attribute Certificate format }}, + year = {2011}, + month = {August}, + publisher = {Open Grid Forum}, + institution = {Open Grid Forum}, + url = {https://www.ogf.org/documents/GFD.182.pdf} +} + +@online{aarc-blueprint, + title = {{The AARC Blueprint Architecture}}, + url = {https://aarc-project.eu/architecture}, + urldate = {2018-12-03} +} + +@online{rcauth-ssh, + title = {{RCAuth.eu: getting proxies using SSH key AuthN}}, + author = {Mischa Sall\'e}, + url = {https://indico.cern.ch/event/669715/contributions/2739035/attachments/1532101/2398499/RCauth_SSH_wlcg_authz_wg.pdf}, + urldate = {2018-12-03} +} + +@online{oauth4myproxy, + title = {{OAuth for MyProxy}}, + url = {http://grid.ncsa.illinois.edu/myproxy/oauth/}, + urldate = {2019-03-18} +} + +@online{rcauth, + title = {{The RCAuth online CA}}, + url = {https://rcauth.eu}, + urldate = {2018-12-03} +} + +@online{dodas, + title = {{Dynamic On Demand Analysis Service: DODAS}}, + url = {https://dodas-ts.github.io/dodas-doc}, + urldate = {2018-12-03} +} + +@online{eosc-hub, + title = {{The EOSC-Hub project}}, + url = {https://www.eosc-hub.eu}, + urldate = {2018-12-03} +} + +@online{aarc, + title = {{The AARC project}}, + url = {https://aarc-project.eu}, + urldate = {2018-12-03} +} + +@online{fim4r, + title = {{Federated Identity Management for Research}}, + url = {https://fim4r.org}, + urldate = {2018-12-03} +} + +@online{wlcg-authz-wg, + title = {{The WLCG Authorization Working Group}}, + url = {https://twiki.cern.ch/twiki/bin/view/LCG/WLCGAuthorizationWG}, + urldate = {2018-12-03} +} + +@online{nikhef, + title = {{The Dutch National Insititute for Sub-atomic Physics}}, + url = {https://www.nikhef.nl}, + urldate = {2019-5-10} +} + +@misc{indigo-datacloud, + Author = {INDIGO-DataCloud Collaboration and : and Davide Salomoni and Isabel + Campos and Luciano Gaido and Jesus Marco de Lucas and Peter Solagna and Jorge + Gomes and Ludek Matyska and Patrick Fuhrman and Marcus Hardt and Giacinto + Donvito and Lukasz Dutka and Marcin Plociennik and Roberto Barbera and + Ignacio Blanquer and Andrea Ceccanti and Mario David and Cristina Duma and + Alvaro López-GarcÃa and Germán Moltó and Pablo Orviz and Zdenek Sustr and + Matthew Viljoen and Fernando Aguilar and Luis Alves and Marica Antonacci + and Lucio Angelo Antonelli and Stefano Bagnasco and Alexandre M. J. J. + Bonvin and Riccardo Bruno and Eva Cetinic and Yin Chen and Alessandro Costa + and Davor Davidovic and Benjamin Ertl and Marco Fargetta and Sandro Fiore + and Stefano Gallozzi and Zeynep Kurkcuoglu and Lara Lloret and Joao Martins + and Alessandra Nuzzo and Paola Nassisi and Cosimo Palazzo and Joao Pina and + Eva Sciacca and Daniele Spiga and Marco Antonio Tangaro and Michal Urbaniak + and Sara Vallero and Bas Wegh and Valentina Zaccolo and Federico Zambelli + and Tomasz Zok}, + Title = {{INDIGO-DataCloud:A data and computing platform to facilitate seamless + access to e-infrastructures}}, + Year = {2017}, + Eprint = {arXiv:1711.01981}, +} + +@online{kubernetes-labels, + title = {{Kubernetes labels and selectors}}, + url = {https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/}, + urldate = {2018-12-03} +} + +@online{spid, + title = {{Sistema Pubblico di Identità Digitale}}, + url = {https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/}, + urldate = {2018-12-03} +} + +@online{hr-db-api-service, + title = {{CERN HR DB API service }}, + url = {https://baltig.infn.it/aceccant/cern-hr-db-service}, + urldate = {2018-12-03} +} + +@online{cern-openshift, + title = {{CERN Openshift PAAS infrastructure}}, + url = {http://information-technology.web.cern.ch/services/PaaS-Web-App}, + urldate = {2018-12-03} +} + +@online{srm-2.2, + title = {{The Storage Resource Manager v. 2.2 specification}}, + url = {https://sdm.lbl.gov/srm-wg/doc/SRM.v2.2.070402.html}, + urldate = {2018-12-03} +} + +@online{keycloak, + title = {{The Keycloak Identity and Access Management system}}, + url = {https://www.keycloak.org/}, + urldate = {2018-12-03} +} + +@inproceedings{cern-sso, + doi = {10.1088/1742-6596/119/8/082008}, + url = {https://doi.org/10.1088%2F1742-6596%2F119%2F8%2F082008}, + year = 2008, + volume = {119}, + number = {8}, + pages = {082008}, + author = {E Ormancey}, + title = {{CERN} single sign on solution}, + booktitle = {Journal of Physics: Conference Series} +} + +@inproceedings{voms-convergence, + author={Andrea Ceccanti and Vincenzo Ciaschini and Maria Dimou and Gabriele Garzoglio and Tanya Levshina and Steve Traylen and Valerio Venturi}, + title={{VOMS/VOMRS utilization patterns and convergence plan}}, + booktitle={Journal of Physics: Conference Series}, + volume={219}, + number={6}, + pages={062006}, + url={http://stacks.iop.org/1742-6596/219/i=6/a=062006}, + year={2010} +} + +@inproceedings{gpfs, + author = {Schmuck, Frank and Haskin, Roger}, + title = {GPFS: A Shared-disk File System for Large Computing Clusters}, + booktitle = {Proceedings of the 1st USENIX Conference on File and Storage Technologies}, + series = {FAST'02}, + year = {2002}, + location = {Monterey, CA}, + pages = {16--16}, + numpages = {1}, + url = {http://dl.acm.org/citation.cfm?id=1973333.1973349}, + acmid = {1973349}, + publisher = {USENIX Association}, + address = {Berkeley, CA, USA}, +} + +@inproceedings{gemss, + doi = {10.1088/1742-6596/608/1/012013}, + url = {https://doi.org/10.1088%2F1742-6596%2F608%2F1%2F012013}, + year = 2015, + month = {may}, + publisher = {{IOP} Publishing}, + volume = {608}, + pages = {012013}, + author = {Pier Paolo Ricci and Alessandro Cavalli and Luca Dell'Agnello and Matteo Favaro and Daniele Gregori and Andrea Prosperini and Michele Pezzi and Vladimir Sapunenko and Giovanni Zizzi and Vincenzo Vagnoni}, + title = {The {INFN}-{CNAF} Tier-1 {GEMSS} Mass Storage System and database facility activity}, + booktitle = {Journal of Physics: Conference Series} +} + +@online{storm-1.11.13, + title = {{StoRM 1.11.13 release notes}}, + url = {http://italiangrid.github.io/storm/release-notes/StoRM-v1.11.13.html}, + urldate = {2018-12-03} +} + +@online{storm-1.11.14, + title = {{StoRM 1.11.14 release notes}}, + url = {http://italiangrid.github.io/storm/release-notes/StoRM-v1.11.14.html}, + urldate = {2018-12-03} +} + +@online{storage-resource-reporting-proposal, + title = {{Storage Resource Reporting proposal for storage providers}}, + url = {https://docs.google.com/document/d/1yzCvKpxsbcQC5K9MyvXc-vBF1HGPBk4vhjw3MEXoXf8}, + urldate = {2018-11-23} +} + +@online{globus-end-of-support, + title = {{Globus toolkit end-of-support announcement}}, + url = {https://www.globus.org/blog/support-open-source-globus-toolkit-ends-january-2018}, + urldate = {2018-12-03} +} + +@online{doma-tpc, + title = {{The WLCG DOMA Third Party Copy (TPC) working group}}, + url = {https://twiki.cern.ch/twiki/bin/view/LCG/ThirdPartyCopy}, + urldate = {2018-12-03} +} + +@online{spring-boot, + title = {{The Spring Boot project}}, + url = {https://spring.io/projects/spring-boot}, + urldate = {2018-12-03} +} + +@online{yaim, + title = {{Yet Antother Installation Manager}}, + url = {https://twiki.cern.ch/twiki/bin/view/EGEE/YAIM}, + urldate = {2018-12-03} +} + +@online{puppet, + title = {{The puppet configuration management tool}}, + url = {https://puppet.com/}, + urldate = {2018-12-03} +} \ No newline at end of file diff --git a/contributions/sd_storm/high-level-arch.png b/contributions/sd_storm/high-level-arch.png new file mode 100644 index 0000000000000000000000000000000000000000..4e6a14b077841497bd8cd62943e1d3f54a6ed6a9 Binary files /dev/null and b/contributions/sd_storm/high-level-arch.png differ diff --git a/contributions/sd_storm/iopams.sty b/contributions/sd_storm/iopams.sty new file mode 100644 index 0000000000000000000000000000000000000000..044dde929745d48d13601b572a0f586728ebf0a4 --- /dev/null +++ b/contributions/sd_storm/iopams.sty @@ -0,0 +1,87 @@ +%% +%% This is file `iopams.sty' +%% File to include AMS fonts and extra definitions for bold greek +%% characters for use with iopart.cls +%% +\NeedsTeXFormat{LaTeX2e} +\ProvidesPackage{iopams}[1997/02/13 v1.0] +\RequirePackage{amsgen}[1995/01/01] +\RequirePackage{amsfonts}[1995/01/01] +\RequirePackage{amssymb}[1995/01/01] +\RequirePackage{amsbsy}[1995/01/01] +% +\iopamstrue % \newif\ifiopams in iopart.cls & iopbk2e.cls +% % allows optional text to be in author guidelines +% +% Bold lower case Greek letters +% +\newcommand{\balpha}{\boldsymbol{\alpha}} +\newcommand{\bbeta}{\boldsymbol{\beta}} +\newcommand{\bgamma}{\boldsymbol{\gamma}} +\newcommand{\bdelta}{\boldsymbol{\delta}} +\newcommand{\bepsilon}{\boldsymbol{\epsilon}} +\newcommand{\bzeta}{\boldsymbol{\zeta}} +\newcommand{\bfeta}{\boldsymbol{\eta}} +\newcommand{\btheta}{\boldsymbol{\theta}} +\newcommand{\biota}{\boldsymbol{\iota}} +\newcommand{\bkappa}{\boldsymbol{\kappa}} +\newcommand{\blambda}{\boldsymbol{\lambda}} +\newcommand{\bmu}{\boldsymbol{\mu}} +\newcommand{\bnu}{\boldsymbol{\nu}} +\newcommand{\bxi}{\boldsymbol{\xi}} +\newcommand{\bpi}{\boldsymbol{\pi}} +\newcommand{\brho}{\boldsymbol{\rho}} +\newcommand{\bsigma}{\boldsymbol{\sigma}} +\newcommand{\btau}{\boldsymbol{\tau}} +\newcommand{\bupsilon}{\boldsymbol{\upsilon}} +\newcommand{\bphi}{\boldsymbol{\phi}} +\newcommand{\bchi}{\boldsymbol{\chi}} +\newcommand{\bpsi}{\boldsymbol{\psi}} +\newcommand{\bomega}{\boldsymbol{\omega}} +\newcommand{\bvarepsilon}{\boldsymbol{\varepsilon}} +\newcommand{\bvartheta}{\boldsymbol{\vartheta}} +\newcommand{\bvaromega}{\boldsymbol{\varomega}} +\newcommand{\bvarrho}{\boldsymbol{\varrho}} +\newcommand{\bvarzeta}{\boldsymbol{\varsigma}} %NB really sigma +\newcommand{\bvarsigma}{\boldsymbol{\varsigma}} +\newcommand{\bvarphi}{\boldsymbol{\varphi}} +% +% Bold upright capital Greek letters +% +\newcommand{\bGamma}{\boldsymbol{\Gamma}} +\newcommand{\bDelta}{\boldsymbol{\Delta}} +\newcommand{\bTheta}{\boldsymbol{\Theta}} +\newcommand{\bLambda}{\boldsymbol{\Lambda}} +\newcommand{\bXi}{\boldsymbol{\Xi}} +\newcommand{\bPi}{\boldsymbol{\Pi}} +\newcommand{\bSigma}{\boldsymbol{\Sigma}} +\newcommand{\bUpsilon}{\boldsymbol{\Upsilon}} +\newcommand{\bPhi}{\boldsymbol{\Phi}} +\newcommand{\bPsi}{\boldsymbol{\Psi}} +\newcommand{\bOmega}{\boldsymbol{\Omega}} +% +% Bold versions of miscellaneous symbols +% +\newcommand{\bpartial}{\boldsymbol{\partial}} +\newcommand{\bell}{\boldsymbol{\ell}} +\newcommand{\bimath}{\boldsymbol{\imath}} +\newcommand{\bjmath}{\boldsymbol{\jmath}} +\newcommand{\binfty}{\boldsymbol{\infty}} +\newcommand{\bnabla}{\boldsymbol{\nabla}} +\newcommand{\bdot}{\boldsymbol{\cdot}} +% +% Symbols for caption +% +\renewcommand{\opensquare}{\mbox{$\square$}} +\renewcommand{\opentriangle}{\mbox{$\vartriangle$}} +\renewcommand{\opentriangledown}{\mbox{$\triangledown$}} +\renewcommand{\opendiamond}{\mbox{$\lozenge$}} +\renewcommand{\fullsquare}{\mbox{$\blacksquare$}} +\newcommand{\fulldiamond}{\mbox{$\blacklozenge$}} +\newcommand{\fullstar}{\mbox{$\bigstar$}} +\newcommand{\fulltriangle}{\mbox{$\blacktriangle$}} +\newcommand{\fulltriangledown}{\mbox{$\blacktriangledown$}} + +\endinput +%% +%% End of file `iopams.sty'. diff --git a/contributions/sd_storm/jpconf.cls b/contributions/sd_storm/jpconf.cls new file mode 100644 index 0000000000000000000000000000000000000000..09f509fdcfde0543cfbc37e4f64c02e11d9b4972 --- /dev/null +++ b/contributions/sd_storm/jpconf.cls @@ -0,0 +1,957 @@ +\NeedsTeXFormat{LaTeX2e}[1995/12/01] +\ProvidesClass{jpconf} + [2007/03/07 v1.1 + LaTeX class for Journal of Physics: Conference Series] +%\RequirePackage{graphicx} +\newcommand\@ptsize{1} +\newif\if@restonecol +\newif\if@letterpaper +\newif\if@titlepage +\newif\ifiopams +\@titlepagefalse +\@letterpaperfalse +\DeclareOption{a4paper} + {\setlength\paperheight {297mm}% + \setlength\paperwidth {210mm}% +\@letterpaperfalse} +\DeclareOption{letterpaper} + {\setlength\paperheight {279.4mm}% + \setlength\paperwidth {215.9mm}% +\@letterpapertrue} +\DeclareOption{landscape} + {\setlength\@tempdima {\paperheight}% + \setlength\paperheight {\paperwidth}% + \setlength\paperwidth {\@tempdima}} +\DeclareOption{twoside}{\@twosidetrue \@mparswitchtrue} +\renewcommand\@ptsize{1} +%\ExecuteOptions{A4paper, twoside} +\ExecuteOptions{A4paper} +\ProcessOptions +\DeclareMathAlphabet{\bi}{OML}{cmm}{b}{it} +\DeclareMathAlphabet{\bcal}{OMS}{cmsy}{b}{n} +\input{jpconf1\@ptsize.clo} +\setlength\lineskip{1\p@} +\setlength\normallineskip{1\p@} +\renewcommand\baselinestretch{} +\setlength\parskip{0\p@ \@plus \p@} +\@lowpenalty 51 +\@medpenalty 151 +\@highpenalty 301 +\setlength\parindent{5mm} +\setcounter{topnumber}{8} +\renewcommand\topfraction{1} +\setcounter{bottomnumber}{3} +\renewcommand\bottomfraction{.99} +\setcounter{totalnumber}{8} +\renewcommand\textfraction{0.01} +\renewcommand\floatpagefraction{.8} +\setcounter{dbltopnumber}{6} +\renewcommand\dbltopfraction{1} +\renewcommand\dblfloatpagefraction{.8} +\renewcommand{\title}{\@ifnextchar[{\@stitle}{\@ftitle}} +\pretolerance=5000 +\tolerance=8000 +% Headings for all pages apart from first +% +\def\ps@headings{% + \let\@oddfoot\@empty + \let\@evenfoot\@empty + \let\@oddhead\@empty + \let\@evenhead\@empty + %\def\@evenhead{\thepage\hfil\itshape\rightmark}% + %\def\@oddhead{{\itshape\leftmark}\hfil\thepage}% + %\def\@evenhead{{\itshape Journal of Physics: Conference Series}\hfill}% + %\def\@oddhead{\hfill {\itshape Journal of Physics: Conference Series}}%% + \let\@mkboth\markboth + \let\sectionmark\@gobble + \let\subsectionmark\@gobble} +% +% Headings for first page +% +\def\ps@myheadings{\let\@oddfoot\@empty\let\@evenfoot\@empty + \let\@oddhead\@empty\let\@evenhead\@empty + \let\@mkboth\@gobbletwo + \let\sectionmark\@gobble + \let\subsectionmark\@gobble} +% +\def\@stitle[#1]#2{\markboth{#1}{#1}% + %\pagestyle{empty}% + \thispagestyle{myheadings} + \vspace*{25mm}{\exhyphenpenalty=10000\hyphenpenalty=10000 + %\Large +\fontsize{18bp}{24bp}\selectfont\bf\raggedright\noindent#2\par}} +\def\@ftitle#1{\markboth{#1}{#1}% + \thispagestyle{myheadings} +%\pagestyle{empty}% + \vspace*{25mm}{\exhyphenpenalty=10000\hyphenpenalty=10000 + %\Large\raggedright\noindent\bf#1\par} +\fontsize{18bp}{24bp}\selectfont\bf\noindent\raggedright#1\par}} +%AUTHOR +\renewcommand{\author}{\@ifnextchar[{\@sauthor}{\@fauthor}} +\def\@sauthor[#1]#2{\markright{#1} % for production only + \vspace*{1.5pc}% + \begin{indented}% + \item[]\normalsize\bf\raggedright#2 + \end{indented}% + \smallskip} +\def\@fauthor#1{%\markright{#1} for production only + \vspace*{1.5pc}% + \begin{indented}% + \item[]\normalsize\bf\raggedright#1 + \end{indented}% + \smallskip} +%E-MAIL +\def\eads#1{\vspace*{5pt}\address{E-mail: #1}} +\def\ead#1{\vspace*{5pt}\address{E-mail: \mailto{#1}}} +\def\mailto#1{{\tt #1}} +%ADDRESS +\newcommand{\address}[1]{\begin{indented} + \item[]\rm\raggedright #1 + \end{indented}} +\newlength{\indentedwidth} +\newdimen\mathindent +\mathindent = 6pc +\indentedwidth=\mathindent +% FOOTNOTES +%\renewcommand\footnoterule{% +% \kern-3\p@ +% \hrule\@width.4\columnwidth +% \kern2.6\p@} +%\newcommand\@makefntext[1]{% +% \parindent 1em% +% \noindent +% \hb@xt@1.8em{\hss\@makefnmark}#1} +% Footnotes: symbols selected in same order as address indicators +% unless optional argument of [<num>] use to specify required symbol, +% 1=\dag, 2=\ddag, etc +% Usage: \footnote{Text of footnote} +% \footnote[3]{Text of footnote} +% +\def\footnoterule{}% +\setcounter{footnote}{0} +\long\def\@makefntext#1{\parindent 1em\noindent + \makebox[1em][l]{\footnotesize\rm$\m@th{\fnsymbol{footnote}}$}% + \footnotesize\rm #1} +\def\@makefnmark{\normalfnmark} +\def\normalfnmark{\hbox{${\fnsymbol{footnote}}\m@th$}} +\def\altfnmark{\hbox{$^{\rm Note}\ {\fnsymbol{footnote}}\m@th$}} +\def\footNote#1{\let\@makefnmark\altfnmark\footnote{#1}\let\@makefnmark\normalfnmark} +\def\@thefnmark{\fnsymbol{footnote}} +\def\footnote{\protect\pfootnote} +\def\pfootnote{\@ifnextchar[{\@xfootnote}{\stepcounter{\@mpfn}% + \begingroup\let\protect\noexpand + \xdef\@thefnmark{\thempfn}\endgroup + \@footnotemark\@footnotetext}} +\def\@xfootnote[#1]{\setcounter{footnote}{#1}% + \addtocounter{footnote}{-1}\footnote} + +\newcommand\ftnote{\protect\pftnote} +\newcommand\pftnote[1]{\setcounter{footnote}{#1}% + \addtocounter{footnote}{-1}\footnote} +\newcommand{\fnm}[1]{\setcounter{footnote}{#1}\footnotetext} + +\def\@fnsymbol#1{\ifnum\thefootnote=99\hbox{*}\else^{\thefootnote}\fi\relax} +% +% Address marker +% +\newcommand{\ad}[1]{\noindent\hbox{$^{#1}$}\relax} +\newcommand{\adnote}[2]{\noindent\hbox{$^{#1,}$}\setcounter{footnote}{#2}% + \addtocounter{footnote}{-1}\footnote} +\def\@tnote{} +\newcounter{oldftnote} +\newcommand{\tnote}[1]{*\gdef\@tnote{% + \setcounter{oldftnote}{\c@footnote}% + \setcounter{footnote}{99}% + \footnotetext{#1}% + \setcounter{footnote}{\c@oldftnote}\addtocounter{footnote}{-1}}} +%================== +% Acknowledgments (no heading if letter) +% Usage \ack for Acknowledgments, \ackn for Acknowledgement +\def\ack{\section*{Acknowledgments}} +\def\ackn{\section*{Acknowledgment}} +%SECTION DEFINITIONS +\setcounter{secnumdepth}{3} +\newcounter {section} +\newcounter {subsection}[section] +\newcounter {subsubsection}[subsection] +\newcounter {paragraph}[subsubsection] +\newcounter {subparagraph}[paragraph] +\renewcommand \thesection {\arabic{section}} +\renewcommand\thesubsection {\thesection.\arabic{subsection}} +\renewcommand\thesubsubsection{\thesubsection .\arabic{subsubsection}} +\renewcommand\theparagraph {\thesubsubsection.\arabic{paragraph}} +\renewcommand\thesubparagraph {\theparagraph.\arabic{subparagraph}} +%\nosections +\def\nosections{\vspace{30\p@ plus12\p@ minus12\p@} + \noindent\ignorespaces} + +%\renewcommand{\@startsection}[6] +%{% +%\if@noskipsec \leavevmode \fi +%\par +% \@tempskipa #4\relax +%%\@tempskipa 0pt\relax +% \@afterindenttrue +% \ifdim \@tempskipa <\z@ +% \@tempskipa -\@tempskipa \@afterindentfalse +% \fi +% \if@nobreak +% \everypar{}% +% \else +% \addpenalty\@secpenalty\addvspace\@tempskipa +% \fi +% \@ifstar +% {\@ssect{#3}{#4}{#5}{#6}}% +% {\@dblarg{\@sect{#1}{#2}{#3}{#4}{#5}{#6}}}} +%\renewcommand{\@sect}[8]{% +% \ifnum #2>\c@secnumdepth +% \let\@svsec\@empty +% \else +% \refstepcounter{#1}% +% \protected@edef\@svsec{\@seccntformat{#1}\relax}% +% \fi +% \@tempskipa #5\relax +% \ifdim \@tempskipa>\z@ +% \begingroup +% #6{% +% \@hangfrom{\hskip #3\relax\@svsec}% +% \interlinepenalty \@M #8\@@par}% +% \endgroup +% \csname #1mark\endcsname{#7}% +% \addcontentsline{toc}{#1}{% +% \ifnum #2>\c@secnumdepth \else +% \protect\numberline{\csname the#1\endcsname}% +% \fi +% #7}% +% \else +% \def\@svsechd{% +% #6{\hskip #3\relax +% \@svsec #8}% +% \csname #1mark\endcsname{#7}% +% \addcontentsline{toc}{#1}{% +% \ifnum #2>\c@secnumdepth \else +% \protect\numberline{\csname the#1\endcsname}% +% \fi +% #7}}% +% \fi +% \@xsect{#5}} +%\renewcommand{\@xsect}[1]{% +% \@tempskipa #1\relax +% \ifdim \@tempskipa>\z@ +% \par \nobreak +% \vskip \@tempskipa +% \@afterheading +% \else +% \@nobreakfalse +% \global\@noskipsectrue +% \everypar{% +% \if@noskipsec +% \global\@noskipsecfalse +% {\setbox\z@\lastbox}% +% \clubpenalty\@M +% \begingroup \@svsechd \endgroup +% \unskip +% \@tempskipa #1\relax +% \hskip -\@tempskipa +% \else +% \clubpenalty \@clubpenalty +% \everypar{}% +% \fi}% +% \fi +% \ignorespaces} +%======================================================================== +\newcommand\section{\@startsection {section}{1}{\z@}% + {-3.25ex\@plus -1ex \@minus -.2ex}% + {1sp}% + {\reset@font\normalsize\bfseries\raggedright}} +\newcommand\subsection{\@startsection{subsection}{2}{\z@}% + {-3.25ex\@plus -1ex \@minus -.2ex}% + {1sp}% + {\reset@font\normalsize\itshape\raggedright}} +\newcommand\subsubsection{\@startsection{subsubsection}{3}{\z@}% + {-3.25ex\@plus -1ex \@minus -.2ex}% + {-1em \@plus .2em}% + {\reset@font\normalsize\itshape}} +\newcommand\paragraph{\@startsection{paragraph}{4}{\z@}% + {3.25ex \@plus1ex \@minus.2ex}% + {-1em}% + {\reset@font\normalsize\itshape}} +\newcommand\subparagraph{\@startsection{subparagraph}{5}{\parindent}% + {3.25ex \@plus1ex \@minus .2ex}% + {-1em}% + {\reset@font\normalsize\itshape}} +\def\@sect#1#2#3#4#5#6[#7]#8{\ifnum #2>\c@secnumdepth + \let\@svsec\@empty\else + \refstepcounter{#1}\edef\@svsec{\csname the#1\endcsname. }\fi + \@tempskipa #5\relax + \ifdim \@tempskipa>\z@ + \begingroup #6\relax + \noindent{\hskip #3\relax\@svsec}{\interlinepenalty \@M #8\par}% + \endgroup + \csname #1mark\endcsname{#7}\addcontentsline + {toc}{#1}{\ifnum #2>\c@secnumdepth \else + \protect\numberline{\csname the#1\endcsname}\fi + #7}\else + \def\@svsechd{#6\hskip #3\relax %% \relax added 2 May 90 + \@svsec #8\csname #1mark\endcsname + {#7}\addcontentsline + {toc}{#1}{\ifnum #2>\c@secnumdepth \else + \protect\numberline{\csname the#1\endcsname}\fi + #7}}\fi + \@xsect{#5}} +% +\def\@ssect#1#2#3#4#5{\@tempskipa #3\relax + \ifdim \@tempskipa>\z@ + \begingroup #4\noindent{\hskip #1}{\interlinepenalty \@M #5\par}\endgroup + \else \def\@svsechd{#4\hskip #1\relax #5}\fi + \@xsect{#3}} +% LIST DEFINITIONS +\setlength\leftmargini {2em} +\leftmargin \leftmargini +\setlength\leftmarginii {2em} +\setlength\leftmarginiii {1.8em} +\setlength\leftmarginiv {1.6em} + \setlength\leftmarginv {1em} + \setlength\leftmarginvi {1em} +\setlength\leftmargin{\leftmargini} +\setlength \labelsep {.5em} +\setlength \labelwidth{\leftmargini} +\addtolength\labelwidth{-\labelsep} +\@beginparpenalty -\@lowpenalty +\@endparpenalty -\@lowpenalty +\@itempenalty -\@lowpenalty +\renewcommand\theenumi{\roman{enumi}} +\renewcommand\theenumii{\alph{enumii}} +\renewcommand\theenumiii{\arabic{enumiii}} +\renewcommand\theenumiv{\Alph{enumiv}} +\newcommand\labelenumi{(\theenumi)} +\newcommand\labelenumii{(\theenumii)} +\newcommand\labelenumiii{\theenumiii.} +\newcommand\labelenumiv{(\theenumiv)} +\renewcommand\p@enumii{(\theenumi)} +\renewcommand\p@enumiii{(\theenumi.\theenumii)} +\renewcommand\p@enumiv{(\theenumi.\theenumii.\theenumiii)} +\newcommand\labelitemi{$\m@th\bullet$} +\newcommand\labelitemii{\normalfont\bfseries --} +\newcommand\labelitemiii{$\m@th\ast$} +\newcommand\labelitemiv{$\m@th\cdot$} +\renewcommand \theequation {\@arabic\c@equation} + +%%%%%%%%%%%%% Figures +\newcounter{figure} +\renewcommand\thefigure{\@arabic\c@figure} +\def\fps@figure{tbp} +\def\ftype@figure{1} +\def\ext@figure{lof} +\def\fnum@figure{\figurename~\thefigure} +\newenvironment{figure}{\footnotesize\rm\@float{figure}}% + {\end@float\normalsize\rm} +\newenvironment{figure*}{\footnotesize\rm\@dblfloat{figure}}{\end@dblfloat} +\newcounter{table} +\renewcommand\thetable{\@arabic\c@table} +\def\fps@table{tbp} +\def\ftype@table{2} +\def\ext@table{lot} +\def\fnum@table{\tablename~\thetable} +\newenvironment{table}{\footnotesize\rm\@float{table}}% + {\end@float\normalsize\rm} +\newenvironment{table*}{\footnotesize\rm\@dblfloat{table}}% + {\end@dblfloat\normalsize\rm} +\newlength\abovecaptionskip +\newlength\belowcaptionskip +\setlength\abovecaptionskip{10\p@} +\setlength\belowcaptionskip{0\p@} +%Table Environments +%\newenvironment{tableref}[3][\textwidth]{% +%\begin{center}% +%\begin{table}% +%\captionsetup[table]{width=#1} +%\centering\caption{\label{#2}#3}}{\end{table}\end{center}} +%%%%%%%%%%%%%%%%% +%\newcounter{figure} +%\renewcommand \thefigure {\@arabic\c@figure} +%\def\fps@figure{tbp} +%\def\ftype@figure{1} +%\def\ext@figure{lof} +%\def\fnum@figure{\figurename~\thefigure} +%ENVIRONMENT: figure +%\newenvironment{figure} +% {\@float{figure}} +% {\end@float} +%ENVIRONMENT: figure* +%\newenvironment{figure*} +% {\@dblfloat{figure}} +% {\end@dblfloat} +%ENVIRONMENT: table +%\newcounter{table} +%\renewcommand\thetable{\@arabic\c@table} +%\def\fps@table{tbp} +%\def\ftype@table{2} +%\def\ext@table{lot} +%\def\fnum@table{\tablename~\thetable} +%\newenvironment{table} +% {\@float{table}} +% {\end@float} +%ENVIRONMENT: table* +%\newenvironment{table*} +% {\@dblfloat{table}} +% {\end@dblfloat} +%\newlength\abovecaptionskip +%\newlength\belowcaptionskip +%\setlength\abovecaptionskip{10\p@} +%\setlength\belowcaptionskip{0\p@} +% CAPTIONS +% Added redefinition of \@caption so captions are not written to +% aux file therefore less need to \protect fragile commands +% +\long\def\@caption#1[#2]#3{\par\begingroup + \@parboxrestore + \normalsize + \@makecaption{\csname fnum@#1\endcsname}{\ignorespaces #3}\par + \endgroup} +\long\def\@makecaption#1#2{% + \vskip\abovecaptionskip + \sbox\@tempboxa{{\bf #1.} #2}% + \ifdim \wd\@tempboxa >\hsize + {\bf #1.} #2\par + \else + \global \@minipagefalse + \hb@xt@\hsize{\hfil\box\@tempboxa\hfil}% + \fi + \vskip\belowcaptionskip} +\DeclareOldFontCommand{\rm}{\normalfont\rmfamily}{\mathrm} +\DeclareOldFontCommand{\sf}{\normalfont\sffamily}{\mathsf} +\DeclareOldFontCommand{\tt}{\normalfont\ttfamily}{\mathtt} +\DeclareOldFontCommand{\bf}{\normalfont\bfseries}{\mathbf} +\DeclareOldFontCommand{\it}{\normalfont\itshape}{\mathit} +\DeclareOldFontCommand{\sl}{\normalfont\slshape}{\@nomath\sl} +\DeclareOldFontCommand{\sc}{\normalfont\scshape}{\@nomath\sc} +\DeclareRobustCommand*\cal{\@fontswitch\relax\mathcal} +\DeclareRobustCommand*\mit{\@fontswitch\relax\mathnormal} +%\newcommand\@pnumwidth{1.55em} +%\newcommand\@tocrmarg{2.55em} +%\newcommand\@dotsep{4.5} +%\setcounter{tocdepth}{3} +%\newcommand\tableofcontents{% +% \section*{\contentsname +% \@mkboth{% +% \MakeUppercase\contentsname}{\MakeUppercase\contentsname}}% +% \@starttoc{toc}% +% } +%\newcommand*\l@part[2]{% +% \ifnum \c@tocdepth >-2\relax +% \addpenalty\@secpenalty +% \addvspace{2.25em \@plus\p@}% +% \begingroup +% \parindent \z@ \rightskip \@pnumwidth +% \parfillskip -\@pnumwidth +% {\leavevmode +% \large \bfseries #1\hfil \hb@xt@\@pnumwidth{\hss #2}}\par +% \nobreak +% \if@compatibility +% \global\@nobreaktrue +% \everypar{\global\@nobreakfalse\everypar{}}% +% \fi +% \endgroup +% \fi} +%\newcommand*\l@section[2]{% +% \ifnum \c@tocdepth >\z@ +% \addpenalty\@secpenalty +% \addvspace{1.0em \@plus\p@}% +% \setlength\@tempdima{1.5em}% +% \begingroup +% \parindent \z@ \rightskip \@pnumwidth +% \parfillskip -\@pnumwidth +% \leavevmode \bfseries +% \advance\leftskip\@tempdima +% \hskip -\leftskip +% #1\nobreak\hfil \nobreak\hb@xt@\@pnumwidth{\hss #2}\par +% \endgroup +% \fi} +%\newcommand*\l@subsection{\@dottedtocline{2}{1.5em}{2.3em}} +%\newcommand*\l@subsubsection{\@dottedtocline{3}{3.8em}{3.2em}} +%\newcommand*\l@paragraph{\@dottedtocline{4}{7.0em}{4.1em}} +%\newcommand*\l@subparagraph{\@dottedtocline{5}{10em}{5em}} +%\newcommand\listoffigures{% +% \section*{\listfigurename +% \@mkboth{\MakeUppercase\listfigurename}% +% {\MakeUppercase\listfigurename}}% +% \@starttoc{lof}% +% } +%\newcommand*\l@figure{\@dottedtocline{1}{1.5em}{2.3em}} +%\newcommand\listoftables{% +% \section*{\listtablename +% \@mkboth{% +% \MakeUppercase\listtablename}{\MakeUppercase\listtablename}}% +% \@starttoc{lot}% +% } +%\let\l@table\l@figure +%====================================== +%ENVIRONMENTS +%====================================== +%ENVIRONMENT: indented +\newenvironment{indented}{\begin{indented}}{\end{indented}} +\newenvironment{varindent}[1]{\begin{varindent}{#1}}{\end{varindent}} +% +\def\indented{\list{}{\itemsep=0\p@\labelsep=0\p@\itemindent=0\p@ + \labelwidth=0\p@\leftmargin=\mathindent\topsep=0\p@\partopsep=0\p@ + \parsep=0\p@\listparindent=15\p@}\footnotesize\rm} +\let\endindented=\endlist +\def\varindent#1{\setlength{\varind}{#1}% + \list{}{\itemsep=0\p@\labelsep=0\p@\itemindent=0\p@ + \labelwidth=0\p@\leftmargin=\varind\topsep=0\p@\partopsep=0\p@ + \parsep=0\p@\listparindent=15\p@}\footnotesize\rm} +\let\endvarindent=\endlist +%ENVIRONMENT: abstract +\newenvironment{abstract}{% + \vspace{16pt plus3pt minus3pt} + \begin{indented} + \item[]{\bfseries \abstractname.}\quad\rm\ignorespaces} + {\end{indented}\vspace{10mm}} +%ENVIRONMENT: description +\newenvironment{description} + {\list{}{\labelwidth\z@ \itemindent-\leftmargin + \let\makelabel\descriptionlabel}} + {\endlist} +\newcommand\descriptionlabel[1]{\hspace\labelsep + \normalfont\bfseries #1} +%ENVIRONMENT: quotation +\newenvironment{quotation} + {\list{}{\listparindent 1.5em% + \itemindent \listparindent + \rightmargin \leftmargin + \parsep \z@ \@plus\p@}% + \item[]} + {\endlist} +%ENVIRONMENT: quote +\newenvironment{quote} + {\list{}{\rightmargin\leftmargin}% + \item[]} + {\endlist} +%ENVIRONMENT: verse +\newenvironment{verse} + {\let\\=\@centercr + \list{}{\itemsep \z@ + \itemindent -1.5em% + \listparindent\itemindent + \rightmargin \leftmargin + \advance\leftmargin 1.5em}% + \item[]} + {\endlist} +%ENVIRONMENT: bibliography +\newdimen\bibindent +\setlength\bibindent{1.5em} +\def\thebibliography#1{\list + {\hfil[\arabic{enumi}]}{\topsep=0\p@\parsep=0\p@ + \partopsep=0\p@\itemsep=0\p@ + \labelsep=5\p@\itemindent=-10\p@ + \settowidth\labelwidth{\footnotesize[#1]}% + \leftmargin\labelwidth + \advance\leftmargin\labelsep + \advance\leftmargin -\itemindent + \usecounter{enumi}}\footnotesize + \def\newblock{\ } + \sloppy\clubpenalty4000\widowpenalty4000 + \sfcode`\.=1000\relax} +\let\endthebibliography=\endlist +\def\numrefs#1{\begin{thebibliography}{#1}} +\def\endnumrefs{\end{thebibliography}} +\let\endbib=\endnumrefs +%%%%%%%%%%%%%%%%%% + + +%\newenvironment{thebibliography}[1] +% {\section*{References} +% \list{\@biblabel{\@arabic\c@enumiv}}% +% {\settowidth\labelwidth{\@biblabel{#1}}% +% \leftmargin\labelwidth +% \advance\leftmargin\labelsep +% \@openbib@code +% \usecounter{enumiv}% +% \let\p@enumiv\@empty +% \renewcommand\theenumiv{\@arabic\c@enumiv}}% +% \sloppy +% \clubpenalty4000 +% \@clubpenalty \clubpenalty +% \widowpenalty4000% +% \sfcode`\.\@m} +% {\def\@noitemerr +% {\@latex@warning{Empty `thebibliography' environment}}% +% \endlist} +%\newcommand\newblock{\hskip .11em\@plus.33em\@minus.07em} +%\let\@openbib@code\@empty +%ENVIRONMENT: theindex +\newenvironment{theindex} + {\if@twocolumn + \@restonecolfalse + \else + \@restonecoltrue + \fi + \columnseprule \z@ + \columnsep 35\p@ + \twocolumn[\section*{\indexname}]% + \@mkboth{\MakeUppercase\indexname}% + {\MakeUppercase\indexname}% + \thispagestyle{plain}\parindent\z@ + \parskip\z@ \@plus .3\p@\relax + \let\item\@idxitem} + {\if@restonecol\onecolumn\else\clearpage\fi} +\newcommand\@idxitem{\par\hangindent 40\p@} +\newcommand\subitem{\@idxitem \hspace*{20\p@}} +\newcommand\subsubitem{\@idxitem \hspace*{30\p@}} +\newcommand\indexspace{\par \vskip 10\p@ \@plus5\p@ \@minus3\p@\relax} +%===================== +\def\appendix{\@ifnextchar*{\@appendixstar}{\@appendix}} +\def\@appendix{\eqnobysec\@appendixstar} +\def\@appendixstar{\@@par + \ifnumbysec % Added 30/4/94 to get Table A1, + \@addtoreset{table}{section} % Table B1 etc if numbering by + \@addtoreset{figure}{section}\fi % section + \setcounter{section}{0} + \setcounter{subsection}{0} + \setcounter{subsubsection}{0} + \setcounter{equation}{0} + \setcounter{figure}{0} + \setcounter{table}{0} + \def\thesection{Appendix \Alph{section}} + \def\theequation{\ifnumbysec + \Alph{section}.\arabic{equation}\else + \Alph{section}\arabic{equation}\fi} % Comment A\arabic{equation} maybe + \def\thetable{\ifnumbysec % better? 15/4/95 + \Alph{section}\arabic{table}\else + A\arabic{table}\fi} + \def\thefigure{\ifnumbysec + \Alph{section}\arabic{figure}\else + A\arabic{figure}\fi}} +\def\noappendix{\setcounter{figure}{0} + \setcounter{table}{0} + \def\thetable{\arabic{table}} + \def\thefigure{\arabic{figure}}} +\setlength\arraycolsep{5\p@} +\setlength\tabcolsep{6\p@} +\setlength\arrayrulewidth{.4\p@} +\setlength\doublerulesep{2\p@} +\setlength\tabbingsep{\labelsep} +\skip\@mpfootins = \skip\footins +\setlength\fboxsep{3\p@} +\setlength\fboxrule{.4\p@} +\renewcommand\theequation{\arabic{equation}} +% NAME OF STRUCTURES +\newcommand\contentsname{Contents} +\newcommand\listfigurename{List of Figures} +\newcommand\listtablename{List of Tables} +\newcommand\refname{References} +\newcommand\indexname{Index} +\newcommand\figurename{Figure} +\newcommand\tablename{Table} +\newcommand\partname{Part} +\newcommand\appendixname{Appendix} +\newcommand\abstractname{Abstract} +%Miscellaneous commands +\newcommand{\BibTeX}{{\rm B\kern-.05em{\sc i\kern-.025em b}\kern-.08em + T\kern-.1667em\lower.7ex\hbox{E}\kern-.125emX}} +\newcommand{\jpcsit}{{\bfseries\itshape\selectfont Journal of Physics: Conference Series}} +\newcommand{\jpcs}{{\itshape\selectfont Journal of Physics: Conference Series}} +\newcommand{\iopp}{IOP Publishing} +\newcommand{\cls}{{\upshape\selectfont\texttt{jpconf.cls}}} +\newcommand{\corg}{conference organizer} +\newcommand\today{\number\day\space\ifcase\month\or + January\or February\or March\or April\or May\or June\or + July\or August\or September\or October\or November\or December\fi + \space\number\year} + \setlength\columnsep{10\p@} +\setlength\columnseprule{0\p@} +\newcommand{\Tables}{\clearpage\section*{Tables and table captions} +\def\fps@table{hp}\noappendix} +\newcommand{\Figures}{\clearpage\section*{Figure captions} + \def\fps@figure{hp}\noappendix} +% +\newcommand{\Figure}[1]{\begin{figure} + \caption{#1} + \end{figure}} +% +\newcommand{\Table}[1]{\begin{table} + \caption{#1} + \begin{indented} + \lineup + \item[]\begin{tabular}{@{}l*{15}{l}}} +\def\endTable{\end{tabular}\end{indented}\end{table}} +\let\endtab=\endTable +% +\newcommand{\fulltable}[1]{\begin{table} + \caption{#1} + \lineup + \begin{tabular*}{\textwidth}{@{}l*{15}{@{\extracolsep{0pt plus 12pt}}l}}} +\def\endfulltable{\end{tabular*}\end{table}} +%BIBLIOGRAPHY and References +%\newcommand{\Bibliography}[1]{\section*{References}\par\numrefs{#1}} +%\newcommand{\References}{\section*{References}\par\refs} +%\def\thebibliography#1{\list +% {\hfil[\arabic{enumi}]}{\topsep=0\p@\parsep=0\p@ +% \partopsep=0\p@\itemsep=0\p@ +% \labelsep=5\p@\itemindent=-10\p@ +% \settowidth\labelwidth{\footnotesize[#1]}% +% \leftmargin\labelwidth +% \advance\leftmargin\labelsep +% \advance\leftmargin -\itemindent +% \usecounter{enumi}}\footnotesize +% \def\newblock{\ } +% \sloppy\clubpenalty4000\widowpenalty4000 +% \sfcode`\.=1000\relax} +%\let\endthebibliography=\endlist +%\def\numrefs#1{\begin{thebibliography}{#1}} +%\def\endnumrefs{\end{thebibliography}} +%\let\endbib=\endnumrefs + +\def\thereferences{\list{}{\topsep=0\p@\parsep=0\p@ + \partopsep=0\p@\itemsep=0\p@\labelsep=0\p@\itemindent=-18\p@ +\labelwidth=0\p@\leftmargin=18\p@ +}\footnotesize\rm +\def\newblock{\ } +\sloppy\clubpenalty4000\widowpenalty4000 +\sfcode`\.=1000\relax}% +\let\endthereferences=\endlist +% MISC EQUATRION STUFF +%\def\[{\relax\ifmmode\@badmath\else +% \begin{trivlist} +% \@beginparpenalty\predisplaypenalty +% \@endparpenalty\postdisplaypenalty +% \item[]\leavevmode +% \hbox to\linewidth\bgroup$ \displaystyle +% \hskip\mathindent\bgroup\fi} +%\def\]{\relax\ifmmode \egroup $\hfil \egroup \end{trivlist}\else \@badmath \fi} +%\def\equation{\@beginparpenalty\predisplaypenalty +% \@endparpenalty\postdisplaypenalty +%\refstepcounter{equation}\trivlist \item[]\leavevmode +% \hbox to\linewidth\bgroup $ \displaystyle +%\hskip\mathindent} +%\def\endequation{$\hfil \displaywidth\linewidth\@eqnnum\egroup \endtrivlist} +%\@namedef{equation*}{\[} +%\@namedef{endequation*}{\]} +%\def\eqnarray{\stepcounter{equation}\let\@currentlabel=\theequation +%\global\@eqnswtrue +%\global\@eqcnt\z@\tabskip\mathindent\let\\=\@eqncr +%\abovedisplayskip\topsep\ifvmode\advance\abovedisplayskip\partopsep\fi +%\belowdisplayskip\abovedisplayskip +%\belowdisplayshortskip\abovedisplayskip +%\abovedisplayshortskip\abovedisplayskip +%$$\halign to +%\linewidth\bgroup\@eqnsel$\displaystyle\tabskip\z@ +% {##{}}$&\global\@eqcnt\@ne $\displaystyle{{}##{}}$\hfil +% &\global\@eqcnt\tw@ $\displaystyle{{}##}$\hfil +% \tabskip\@centering&\llap{##}\tabskip\z@\cr} +%\def\endeqnarray{\@@eqncr\egroup +% \global\advance\c@equation\m@ne$$\global\@ignoretrue } +%\mathindent = 6pc +%% +%\def\eqalign#1{\null\vcenter{\def\\{\cr}\openup\jot\m@th +% \ialign{\strut$\displaystyle{##}$\hfil&$\displaystyle{{}##}$\hfil +% \crcr#1\crcr}}\,} +%% +%\def\eqalignno#1{\displ@y \tabskip\z@skip +% \halign to\displaywidth{\hspace{5pc}$\@lign\displaystyle{##}$% +% \tabskip\z@skip +% &$\@lign\displaystyle{{}##}$\hfill\tabskip\@centering +% &\llap{$\@lign\hbox{\rm##}$}\tabskip\z@skip\crcr +% #1\crcr}} +%% +\newif\ifnumbysec +\def\theequation{\ifnumbysec + \arabic{section}.\arabic{equation}\else + \arabic{equation}\fi} +\def\eqnobysec{\numbysectrue\@addtoreset{equation}{section}} +\newcounter{eqnval} +\def\numparts{\addtocounter{equation}{1}% + \setcounter{eqnval}{\value{equation}}% + \setcounter{equation}{0}% + \def\theequation{\ifnumbysec + \arabic{section}.\arabic{eqnval}{\it\alph{equation}}% + \else\arabic{eqnval}{\it\alph{equation}}\fi}} +\def\endnumparts{\def\theequation{\ifnumbysec + \arabic{section}.\arabic{equation}\else + \arabic{equation}\fi}% + \setcounter{equation}{\value{eqnval}}} +% +\def\cases#1{% + \left\{\,\vcenter{\def\\{\cr}\normalbaselines\openup1\jot\m@th% + \ialign{\strut$\displaystyle{##}\hfil$&\tqs + \rm##\hfil\crcr#1\crcr}}\right.}% +\def\eqalign#1{\null\vcenter{\def\\{\cr}\openup\jot\m@th + \ialign{\strut$\displaystyle{##}$\hfil&$\displaystyle{{}##}$\hfil + \crcr#1\crcr}}\,} +% OTHER USEFUL BITS +\newcommand{\e}{\mathrm{e}} +\newcommand{\rme}{\mathrm{e}} +\newcommand{\rmi}{\mathrm{i}} +\newcommand{\rmd}{\mathrm{d}} +\renewcommand{\qquad}{\hspace*{25pt}} +\newcommand{\tdot}[1]{\stackrel{\dots}{#1}} % Added 1/9/94 +\newcommand{\tqs}{\hspace*{25pt}} +\newcommand{\fl}{\hspace*{-\mathindent}} +\newcommand{\Tr}{\mathop{\mathrm{Tr}}\nolimits} +\newcommand{\tr}{\mathop{\mathrm{tr}}\nolimits} +\newcommand{\Or}{\mathord{\mathrm{O}}} %changed from \mathop 20/1/95 +\newcommand{\lshad}{[\![} +\newcommand{\rshad}{]\!]} +\newcommand{\case}[2]{{\textstyle\frac{#1}{#2}}} +\def\pt(#1){({\it #1\/})} +\newcommand{\dsty}{\displaystyle} +\newcommand{\tsty}{\textstyle} +\newcommand{\ssty}{\scriptstyle} +\newcommand{\sssty}{\scriptscriptstyle} +\def\lo#1{\llap{${}#1{}$}} +\def\eql{\llap{${}={}$}} +\def\lsim{\llap{${}\sim{}$}} +\def\lsimeq{\llap{${}\simeq{}$}} +\def\lequiv{\llap{${}\equiv{}$}} +% +\newcommand{\eref}[1]{(\ref{#1})} +%\newcommand{\eqref}[1]{Equation (\ref{#1})} +%\newcommand{\Eqref}[1]{Equation (\ref{#1})} +\newcommand{\sref}[1]{section~\ref{#1}} +\newcommand{\fref}[1]{figure~\ref{#1}} +\newcommand{\tref}[1]{table~\ref{#1}} +\newcommand{\Sref}[1]{Section~\ref{#1}} +\newcommand{\Fref}[1]{Figure~\ref{#1}} +\newcommand{\Tref}[1]{Table~\ref{#1}} +\newcommand{\opencircle}{\mbox{\Large$\circ\,$}} % moved Large outside maths +\newcommand{\opensquare}{\mbox{$\rlap{$\sqcap$}\sqcup$}} +\newcommand{\opentriangle}{\mbox{$\triangle$}} +\newcommand{\opentriangledown}{\mbox{$\bigtriangledown$}} +\newcommand{\opendiamond}{\mbox{$\diamondsuit$}} +\newcommand{\fullcircle}{\mbox{{\Large$\bullet\,$}}} % moved Large outside maths +\newcommand{\fullsquare}{\,\vrule height5pt depth0pt width5pt} +\newcommand{\dotted}{\protect\mbox{${\mathinner{\cdotp\cdotp\cdotp\cdotp\cdotp\cdotp}}$}} +\newcommand{\dashed}{\protect\mbox{-\; -\; -\; -}} +\newcommand{\broken}{\protect\mbox{-- -- --}} +\newcommand{\longbroken}{\protect\mbox{--- --- ---}} +\newcommand{\chain}{\protect\mbox{--- $\cdot$ ---}} +\newcommand{\dashddot}{\protect\mbox{--- $\cdot$ $\cdot$ ---}} +\newcommand{\full}{\protect\mbox{------}} + +\def\;{\protect\psemicolon} +\def\psemicolon{\relax\ifmmode\mskip\thickmuskip\else\kern .3333em\fi} +\def\lineup{\def\0{\hbox{\phantom{0}}}% + \def\m{\hbox{$\phantom{-}$}}% + \def\-{\llap{$-$}}} +% +%%%%%%%%%%%%%%%%%%%%% +% Tables rules % +%%%%%%%%%%%%%%%%%%%%% + +\newcommand{\boldarrayrulewidth}{1\p@} +% Width of bold rule in tabular environment. + +\def\bhline{\noalign{\ifnum0=`}\fi\hrule \@height +\boldarrayrulewidth \futurelet \@tempa\@xhline} + +\def\@xhline{\ifx\@tempa\hline\vskip \doublerulesep\fi + \ifnum0=`{\fi}} + +% +% Rules for tables with extra space around +% +\newcommand{\br}{\ms\bhline\ms} +\newcommand{\mr}{\ms\hline\ms} +% +\newcommand{\centre}[2]{\multispan{#1}{\hfill #2\hfill}} +\newcommand{\crule}[1]{\multispan{#1}{\hspace*{\tabcolsep}\hrulefill + \hspace*{\tabcolsep}}} +\newcommand{\fcrule}[1]{\ifnum\thetabtype=1\multispan{#1}{\hrulefill + \hspace*{\tabcolsep}}\else\multispan{#1}{\hrulefill}\fi} +% +% Extra spaces for tables and displayed equations +% +\newcommand{\ms}{\noalign{\vspace{3\p@ plus2\p@ minus1\p@}}} +\newcommand{\bs}{\noalign{\vspace{6\p@ plus2\p@ minus2\p@}}} +\newcommand{\ns}{\noalign{\vspace{-3\p@ plus-1\p@ minus-1\p@}}} +\newcommand{\es}{\noalign{\vspace{6\p@ plus2\p@ minus2\p@}}\displaystyle}% +% +\newcommand{\etal}{{\it et al\/}\ } +\newcommand{\dash}{------} +\newcommand{\nonum}{\par\item[]} %\par added 1/9/93 +\newcommand{\mat}[1]{\underline{\underline{#1}}} +% +% abbreviations for IOPP journals +% +\newcommand{\CQG}{{\it Class. Quantum Grav.} } +\newcommand{\CTM}{{\it Combust. Theory Modelling\/} } +\newcommand{\DSE}{{\it Distrib. Syst. Engng\/} } +\newcommand{\EJP}{{\it Eur. J. Phys.} } +\newcommand{\HPP}{{\it High Perform. Polym.} } % added 4/5/93 +\newcommand{\IP}{{\it Inverse Problems\/} } +\newcommand{\JHM}{{\it J. Hard Mater.} } % added 4/5/93 +\newcommand{\JO}{{\it J. Opt.} } +\newcommand{\JOA}{{\it J. Opt. A: Pure Appl. Opt.} } +\newcommand{\JOB}{{\it J. Opt. B: Quantum Semiclass. Opt.} } +\newcommand{\JPA}{{\it J. Phys. A: Math. Gen.} } +\newcommand{\JPB}{{\it J. Phys. B: At. Mol. Phys.} } %1968-87 +\newcommand{\jpb}{{\it J. Phys. B: At. Mol. Opt. Phys.} } %1988 and onwards +\newcommand{\JPC}{{\it J. Phys. C: Solid State Phys.} } %1968--1988 +\newcommand{\JPCM}{{\it J. Phys.: Condens. Matter\/} } %1989 and onwards +\newcommand{\JPD}{{\it J. Phys. D: Appl. Phys.} } +\newcommand{\JPE}{{\it J. Phys. E: Sci. Instrum.} } +\newcommand{\JPF}{{\it J. Phys. F: Met. Phys.} } +\newcommand{\JPG}{{\it J. Phys. G: Nucl. Phys.} } %1975--1988 +\newcommand{\jpg}{{\it J. Phys. G: Nucl. Part. Phys.} } %1989 and onwards +\newcommand{\MSMSE}{{\it Modelling Simulation Mater. Sci. Eng.} } +\newcommand{\MST}{{\it Meas. Sci. Technol.} } %1990 and onwards +\newcommand{\NET}{{\it Network: Comput. Neural Syst.} } +\newcommand{\NJP}{{\it New J. Phys.} } +\newcommand{\NL}{{\it Nonlinearity\/} } +\newcommand{\NT}{{\it Nanotechnology} } +\newcommand{\PAO}{{\it Pure Appl. Optics\/} } +\newcommand{\PM}{{\it Physiol. Meas.} } % added 4/5/93 +\newcommand{\PMB}{{\it Phys. Med. Biol.} } +\newcommand{\PPCF}{{\it Plasma Phys. Control. Fusion\/} } % added 4/5/93 +\newcommand{\PSST}{{\it Plasma Sources Sci. Technol.} } +\newcommand{\PUS}{{\it Public Understand. Sci.} } +\newcommand{\QO}{{\it Quantum Opt.} } +\newcommand{\QSO}{{\em Quantum Semiclass. Opt.} } +\newcommand{\RPP}{{\it Rep. Prog. Phys.} } +\newcommand{\SLC}{{\it Sov. Lightwave Commun.} } % added 4/5/93 +\newcommand{\SST}{{\it Semicond. Sci. Technol.} } +\newcommand{\SUST}{{\it Supercond. Sci. Technol.} } +\newcommand{\WRM}{{\it Waves Random Media\/} } +\newcommand{\JMM}{{\it J. Micromech. Microeng.\/} } +% +% Other commonly quoted journals +% +\newcommand{\AC}{{\it Acta Crystallogr.} } +\newcommand{\AM}{{\it Acta Metall.} } +\newcommand{\AP}{{\it Ann. Phys., Lpz.} } +\newcommand{\APNY}{{\it Ann. Phys., NY\/} } +\newcommand{\APP}{{\it Ann. Phys., Paris\/} } +\newcommand{\CJP}{{\it Can. J. Phys.} } +\newcommand{\JAP}{{\it J. Appl. Phys.} } +\newcommand{\JCP}{{\it J. Chem. Phys.} } +\newcommand{\JJAP}{{\it Japan. J. Appl. Phys.} } +\newcommand{\JP}{{\it J. Physique\/} } +\newcommand{\JPhCh}{{\it J. Phys. Chem.} } +\newcommand{\JMMM}{{\it J. Magn. Magn. Mater.} } +\newcommand{\JMP}{{\it J. Math. Phys.} } +\newcommand{\JOSA}{{\it J. Opt. Soc. Am.} } +\newcommand{\JPSJ}{{\it J. Phys. Soc. Japan\/} } +\newcommand{\JQSRT}{{\it J. Quant. Spectrosc. Radiat. Transfer\/} } +\newcommand{\NC}{{\it Nuovo Cimento\/} } +\newcommand{\NIM}{{\it Nucl. Instrum. Methods\/} } +\newcommand{\NP}{{\it Nucl. Phys.} } +\newcommand{\PL}{{\it Phys. Lett.} } +\newcommand{\PR}{{\it Phys. Rev.} } +\newcommand{\PRL}{{\it Phys. Rev. Lett.} } +\newcommand{\PRS}{{\it Proc. R. Soc.} } +\newcommand{\PS}{{\it Phys. Scr.} } +\newcommand{\PSS}{{\it Phys. Status Solidi\/} } +\newcommand{\PTRS}{{\it Phil. Trans. R. Soc.} } +\newcommand{\RMP}{{\it Rev. Mod. Phys.} } +\newcommand{\RSI}{{\it Rev. Sci. Instrum.} } +\newcommand{\SSC}{{\it Solid State Commun.} } +\newcommand{\ZP}{{\it Z. Phys.} } +%=================== +\pagestyle{headings} +\pagenumbering{arabic} +\raggedbottom +\onecolumn +\endinput +%% +%% End of file `jconf.cls'. diff --git a/contributions/sd_storm/jpconf11.clo b/contributions/sd_storm/jpconf11.clo new file mode 100644 index 0000000000000000000000000000000000000000..63541cbb98638b86bbc1df2d09f4eafbe3233a42 --- /dev/null +++ b/contributions/sd_storm/jpconf11.clo @@ -0,0 +1,141 @@ +%% +%% This is file `jpconf11.clo' +%% +%% This file is distributed in the hope that it will be useful, +%% but WITHOUT ANY WARRANTY; without even the implied warranty of +%% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +%% +%% \CharacterTable +%% {Upper-case \A\B\C\D\E\F\G\H\I\J\K\L\M\N\O\P\Q\R\S\T\U\V\W\X\Y\Z +%% Lower-case \a\b\c\d\e\f\g\h\i\j\k\l\m\n\o\p\q\r\s\t\u\v\w\x\y\z +%% Digits \0\1\2\3\4\5\6\7\8\9 +%% Exclamation \! Double quote \" Hash (number) \# +%% Dollar \$ Percent \% Ampersand \& +%% Acute accent \' Left paren \( Right paren \) +%% Asterisk \* Plus \+ Comma \, +%% Minus \- Point \. Solidus \/ +%% Colon \: Semicolon \; Less than \< +%% Equals \= Greater than \> Question mark \? +%% Commercial at \@ Left bracket \[ Backslash \\ +%% Right bracket \] Circumflex \^ Underscore \_ +%% Grave accent \` Left brace \{ Vertical bar \| +%% Right brace \} Tilde \~} +\ProvidesFile{jpconf11.clo}[2005/05/04 v1.0 LaTeX2e file (size option)] +\renewcommand\normalsize{% + \@setfontsize\normalsize\@xipt{13}% + \abovedisplayskip 12\p@ \@plus3\p@ \@minus7\p@ + \abovedisplayshortskip \z@ \@plus3\p@ + \belowdisplayshortskip 6.5\p@ \@plus3.5\p@ \@minus3\p@ + \belowdisplayskip \abovedisplayskip + \let\@listi\@listI} +\normalsize +\newcommand\small{% + \@setfontsize\small\@xpt{12}% + \abovedisplayskip 11\p@ \@plus3\p@ \@minus6\p@ + \abovedisplayshortskip \z@ \@plus3\p@ + \belowdisplayshortskip 6.5\p@ \@plus3.5\p@ \@minus3\p@ + \def\@listi{\leftmargin\leftmargini + \topsep 9\p@ \@plus3\p@ \@minus5\p@ + \parsep 4.5\p@ \@plus2\p@ \@minus\p@ + \itemsep \parsep}% + \belowdisplayskip \abovedisplayskip} +\newcommand\footnotesize{% +% \@setfontsize\footnotesize\@xpt\@xiipt + \@setfontsize\footnotesize\@ixpt{11}% + \abovedisplayskip 10\p@ \@plus2\p@ \@minus5\p@ + \abovedisplayshortskip \z@ \@plus3\p@ + \belowdisplayshortskip 6\p@ \@plus3\p@ \@minus3\p@ + \def\@listi{\leftmargin\leftmargini + \topsep 6\p@ \@plus2\p@ \@minus2\p@ + \parsep 3\p@ \@plus2\p@ \@minus\p@ + \itemsep \parsep}% + \belowdisplayskip \abovedisplayskip +} +\newcommand\scriptsize{\@setfontsize\scriptsize\@viiipt{9.5}} +\newcommand\tiny{\@setfontsize\tiny\@vipt\@viipt} +\newcommand\large{\@setfontsize\large\@xivpt{18}} +\newcommand\Large{\@setfontsize\Large\@xviipt{22}} +\newcommand\LARGE{\@setfontsize\LARGE\@xxpt{25}} +\newcommand\huge{\@setfontsize\huge\@xxvpt{30}} +\let\Huge=\huge +\if@twocolumn + \setlength\parindent{14\p@} + \else + \setlength\parindent{18\p@} +\fi +\if@letterpaper% +%\input{letmarg.tex}% +\setlength{\hoffset}{0mm} +\setlength{\marginparsep}{0mm} +\setlength{\marginparwidth}{0mm} +\setlength{\textwidth}{160mm} +\setlength{\oddsidemargin}{-0.4mm} +\setlength{\evensidemargin}{-0.4mm} +\setlength{\voffset}{0mm} +\setlength{\headheight}{8mm} +\setlength{\headsep}{5mm} +\setlength{\footskip}{0mm} +\setlength{\textheight}{230mm} +\setlength{\topmargin}{1.6mm} +\else +%\input{a4marg.tex}% +\setlength{\hoffset}{0mm} +\setlength{\marginparsep}{0mm} +\setlength{\marginparwidth}{0mm} +\setlength{\textwidth}{160mm} +\setlength{\oddsidemargin}{-0.4mm} +\setlength{\evensidemargin}{-0.4mm} +\setlength{\voffset}{0mm} +\setlength{\headheight}{8mm} +\setlength{\headsep}{5mm} +\setlength{\footskip}{0mm} +\setlength{\textheight}{230mm} +\setlength{\topmargin}{1.6mm} +\fi +\setlength\maxdepth{.5\topskip} +\setlength\@maxdepth\maxdepth +\setlength\footnotesep{8.4\p@} +\setlength{\skip\footins} {10.8\p@ \@plus 4\p@ \@minus 2\p@} +\setlength\floatsep {14\p@ \@plus 2\p@ \@minus 4\p@} +\setlength\textfloatsep {24\p@ \@plus 2\p@ \@minus 4\p@} +\setlength\intextsep {16\p@ \@plus 4\p@ \@minus 4\p@} +\setlength\dblfloatsep {16\p@ \@plus 2\p@ \@minus 4\p@} +\setlength\dbltextfloatsep{24\p@ \@plus 2\p@ \@minus 4\p@} +\setlength\@fptop{0\p@} +\setlength\@fpsep{10\p@ \@plus 1fil} +\setlength\@fpbot{0\p@} +\setlength\@dblfptop{0\p@} +\setlength\@dblfpsep{10\p@ \@plus 1fil} +\setlength\@dblfpbot{0\p@} +\setlength\partopsep{3\p@ \@plus 2\p@ \@minus 2\p@} +\def\@listI{\leftmargin\leftmargini + \parsep=\z@ + \topsep=6\p@ \@plus3\p@ \@minus3\p@ + \itemsep=3\p@ \@plus2\p@ \@minus1\p@} +\let\@listi\@listI +\@listi +\def\@listii {\leftmargin\leftmarginii + \labelwidth\leftmarginii + \advance\labelwidth-\labelsep + \topsep=3\p@ \@plus2\p@ \@minus\p@ + \parsep=\z@ + \itemsep=\parsep} +\def\@listiii{\leftmargin\leftmarginiii + \labelwidth\leftmarginiii + \advance\labelwidth-\labelsep + \topsep=\z@ + \parsep=\z@ + \partopsep=\z@ + \itemsep=\z@} +\def\@listiv {\leftmargin\leftmarginiv + \labelwidth\leftmarginiv + \advance\labelwidth-\labelsep} +\def\@listv{\leftmargin\leftmarginv + \labelwidth\leftmarginv + \advance\labelwidth-\labelsep} +\def\@listvi {\leftmargin\leftmarginvi + \labelwidth\leftmarginvi + \advance\labelwidth-\labelsep} +\endinput +%% +%% End of file `iopart12.clo'. diff --git a/contributions/sd_storm/main.tex b/contributions/sd_storm/main.tex new file mode 100644 index 0000000000000000000000000000000000000000..ee6ce3e88c41fecc1dbd09ad537a132f7b208d10 --- /dev/null +++ b/contributions/sd_storm/main.tex @@ -0,0 +1,181 @@ +\documentclass[a4paper]{jpconf} + +\usepackage{url} +\usepackage{graphicx} +\usepackage{float} + +\newcommand{\quotes}[1]{``#1''} + +\begin{document} + +\title{StoRM maintenance and evolution} + +\author{ + A. Ceccanti$^1$, + E. Vianello$^1$, + F. Giacomini$^1$ +} + +\address{$^1$ INFN-CNAF, Bologna, IT} + +\ead{ + andrea.ceccanti@cnaf.infn.it +} + +\begin{abstract} + StoRM is the storage element solution that powers the CNAF Tier 1 data center as well as more than 30 other sites. In this contribution, we highlight the main maintenance and evolution activities on StoRM during 2018. +\end{abstract} + +\section*{Introduction} +\label{sec:introduction} + + +StoRM~\cite{storm} is a lightweight storage resource manager (SRM) solution developed at INFN-CNAF which powers the CNAF Tier 1 data center as well as more than 30 other sites. + +StoRM implements the SRM version 2.2~\cite{srm-2.2} data management specification and is typically deployed on top of a cluster file system like IBM GPFS~\cite{gpfs}. + +StoRM has a layered architecture (Figure~\ref{fig:storm-arch}), split between two main components: the StoRM frontend and backend services. +The StoRM frontend service implements the SRM interface exposed +to client applications and frameworks. +The StoRM backend service implements the actual storage management logic by interacting directly with the underlying file system. +Communication between the frontend and the backend happens in two ways: +\begin{itemize} + \item via an XML-RPC api, for synchronous requests; + \item via a database, for asynchronous requests. +\end{itemize} + +Data transfer is provided by GridFTP, HTTP and XRootD services accessing directly the file system underlying the StoRM deployment. + +StoRM is interfaced with the IBM Tivoli Storage Manager (TSM) via GEMSS~\cite{gemss}, a component also developed at INFN, to provide optimized data archiving and tape recall functionality. The StoRM WebDAV service provides an alternative data management interface complementary to the SRM functionality, but which does not yet support tape operations. +An high level representation of the StoRM architecture is given in Figure~\ref{fig:storm-arch}. + +During 2018, two StoRM releases where produced: + +\begin{itemize} + \item StoRM 1.11.13~\cite{storm-1.11.13}, released on February, 19th, providing updates for the StoRM backend, YAIM module and the info provider; + \item StoRM 1.11.14~\cite{storm-1.11.14}, released on July, 25th, providing updates for the frontend and backend services, StoRM native and xmlrpc libraries, the GridFTP DSI module and the YAIM module. +\end{itemize} + +The following paragraphs describe the main StoRM maintenance and evolution activities that resulted in the above releases and in pre-release packages made available to the CNAF Tier 1 and other interested sites during 2018. + +\begin{figure} + \centering + \includegraphics[width=.6\textwidth]{storm-arch.png} + \caption{\label{fig:storm-arch}The StoRM high level architecture.} +\end{figure} + +\section*{StoRM frontend stability improvements} + +After observing repeated failures that resulted in the death of the StoRM frontend process in production at Tier 1, an investigation was started +to understand the cause of the failures and provide a fix to improve +the service stability. +The failures occurred mainly when an high number of requests was observed on the frontend. Enabling core dumping did not provide much information, besides the fact that the segfault occurred mostly in the XMLRPC serializiation/deserialization logic, and was likely caused by stack corruption. What precisely caused the stack corruption however was not +understood. + +In order to contain the problem, the following improvements were +implemented: + +\begin{itemize} + \item a configurable limit on the size of the request queue + on the frontend was implemented; + \item information about the request queue size and the number of + active requests was added to the frontend log, in order to monitor the queue processing status in real time; + \item the logic of the XMLRPC interaction between frontend and backend + has been refactored in order to use the xmlrpc synchronous API (the former use of the asynchronous API only complicated the code base without providing increased concurrency or throughput); + \item a configurable limit on the size of the threadpool serving XMLRPC requests has been introduced on the backend; + \item a configurable limit on the size of the queue of the XMLRPC requests has been introduced on the backend; + \item our load test suite was tuned to generate a load comparable with + the one observed in production for the ATLAS experiment. +\end{itemize} + +These improvements, and appropriate configuration, restored the frontend +service stability: no more crashes were observed in production, even during peak load periods. + +\section*{JSON storage usage record reporting} + +In consultation with all the LHC experiments, the WLCG storage providers (dCache~\cite{dcache}, DPM~\cite{dpm}, EOS~\cite{eos}, StoRM, XRootD~\cite{xrootd}) drafted a proposal for storage resource reporting in WLCG~\cite{storage-resource-reporting-proposal}. +This document proposes five requirements: +\begin{itemize} + \item \texttt{R0}: storage systems should provide the total used space and the list of files stored (no other meta-data required); + \item \texttt{R1}: storage systems should provide the total used and total free space for all distinct space quotas available to the experiment through a non-SRM protocol (GridFTP, HTTP or XRootD) and with ten minutes as data freshness order and tens of GB as volume accuracy; + \item \texttt{R2}: storage systems should provide a public summary file indicating the “topology†of the system and usage information; + \item \texttt{R3}: storage systems should provide the total used and total free space on sub-directories, in particular any entity on which a restrictive quota has been applied; + \item \texttt{R4}: storage systems should provide a full storage dump with file information such as size, creation time and check-sum value. +\end{itemize} + +Requirement \texttt{R3} has been withdrawn as no experiment supported its inclusion. +Requirement \texttt{R4} was already supported through WebDAV with a detailed and recursive \texttt{PROPFIND} operation. + +In order to comply with the requirements \texttt{R0}, \texttt{R1} and \texttt{R2}, the following improvements were introduced in February 2018: +\begin{itemize} + \item the backend REST endpoint used to generate the list of configured storage areas and their usage status now produces a JSON response instead of plain text; + \item a new command, \texttt{get-report-json}, has been added to the info provider to generate a JSON site report file, with a configurable target location. +\end{itemize} + +To fulfill requirement \texttt{R2}, the Tier 1 StoRM ATLAS production instance has been configured to expose, +via the StoRM WebDAV service, the JSON usage report in a storage area accessible by any client presenting a trusted X.509 certificate. + +\section*{Backend improved starting logic} + +Aiming to improve StoRM Backend service starting logic, an important re-factoring work has been done on the start-up source code and on the \texttt{init.d} scripts. +Before the re-factoring, each start-up of StoRM Backend service was divided into several running processes making even the kill of the service too much complex than necessary. +After the re-factoring only one process can be seen from command line and all the useless arguments has been removed. That has meant also a relevant improve of service start-up speed. + +Comparing the start-up before the re-factoring: + +\begin{verbatim} +$ time sh start-storm.sh +Bootstrapping storm-backend-server [ OK ] +Starting storm-backend-server [ OK ] + +real 0m20.495s +user 0m0.122s +sys 0m0.140s +\end{verbatim} + +and after: + +\begin{verbatim} +$ time sh start-storm.sh +Starting storm-backend-server: [ OK ] + +real 0m5.217s +user 0m0.083s +sys 0m0.078s +\end{verbatim} + +we can see that the new boot speed is about 4 times faster. + +\section*{WebDAV third-party transfers support} + +At the end of May 2017 the Globus Alliance announced that the Open source +Globus toolkit would be no longer supported by the Globus team at the University of Chicago~\cite{globus-end-of-support}. This announcement had obvious impact on WLCG, since the Globus Security Infrastructure (GSI) and GridFTP lie at the core of the WLCG data management infrastructure, and discussions started in the appropriate forums on the search for alternatives. The DOMA Third party copy Working Group~\cite{doma-tpc} was established to investigate alternatives to the GridFTP protocol for bulk transfers across WLCG sites. This led to a requirement for all storage element implementations to support either WebDAV-based or XrootD-based third-party transfers. + +In order to comply with the requirement, the following improvements were introduced in the StoRM WebDAV service in November 2018: +\begin{itemize} + \item The WebDAV service was migrated to the latest stable Spring boot libraries~\cite{spring-boot}; + \item Token-based delegation and authorization was introduced, by adding support for external OpenID Connect~\cite{oidc} providers and by introducing an internal OAuth~\cite{oauth} authorization server that can be used to issue tokens to client authenticated with VOMS credentials; + \item the semantic of the WebDAV \texttt{COPY} method was extended to implement third-party transfers; + \item a significant refactoring of the robot test suite was implemented, by moving the test suite code in the server repository and simplifying credential management. The refactoring resulted in improved usability, performance and error reporting. +\end{itemize} + +A pre-release package of the updated StoRM WebDAV package was +deployed at CNAF Tier 1 for the ATLAS WebDAV production instance and added successfully to the DOMA TPC testbed where it showed to work reliably. + +The initial deployment also highlighted minor issues which were solved, and lead to the final release of the StoRM WebDAV 1.1.0 release in Februrary 2019. + +\begin{figure} + \centering + \includegraphics[width=.6\textwidth]{tpc.png} + \caption{\label{fig:tpc}A WebDAV push-mode third-party transfer managed by CERN File Transfer Service (FTS) against two storage elements.} +\end{figure} + +\section*{Conclusions and future work} +In this contribution, we presented the main development and evolution activities performed on StoRM during 2018. Besides ordinary maintenance, in 2019 we will focus on porting StoRM 1 +to CENTOS 7 and in replacing the current YAIM-based configuration code~\cite{yaim} with a Puppet module~\cite{puppet}. +\section*{References} + +\bibliographystyle{iopart-num} +\bibliography{biblio} + +\end{document} diff --git a/contributions/sd_storm/storm-arch.png b/contributions/sd_storm/storm-arch.png new file mode 100644 index 0000000000000000000000000000000000000000..75571801d055082e18c5b04b20d9ecaf84801e1f Binary files /dev/null and b/contributions/sd_storm/storm-arch.png differ diff --git a/contributions/sd_storm/tpc.png b/contributions/sd_storm/tpc.png new file mode 100644 index 0000000000000000000000000000000000000000..2cbb95038c4f1351416aa6a55d6eaece9673e8d8 Binary files /dev/null and b/contributions/sd_storm/tpc.png differ diff --git a/contributions/sd_storm2/.gitignore b/contributions/sd_storm2/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..6e81cdec19ae94fef2c32d333d227017f785ea77 --- /dev/null +++ b/contributions/sd_storm2/.gitignore @@ -0,0 +1,6 @@ +main.aux +main.bbl +main.blg +main.log +main.pdf + diff --git a/contributions/sd_storm2/biblio.bib b/contributions/sd_storm2/biblio.bib new file mode 100644 index 0000000000000000000000000000000000000000..2e959e37f5312d2890edb25f690129730dbeeb91 --- /dev/null +++ b/contributions/sd_storm2/biblio.bib @@ -0,0 +1,529 @@ +@misc{ref:spring, + title = {{The Spring framework}}, + note = {\url{https://spring.io/}} +} +@misc{ref:keep-a-changelog, + title = {{Keep a Changelog}}, + note = {\url{https://keepachangelog.com/en/1.0.0/}} +} +@inproceedings{ref:gemss, + title={{The Grid Enabled Mass Storage System (GEMSS): the Storage and Data management system used at the INFN Tier1 at CNAF}}, + author={Ricci, Pier Paolo and Bonacorsi, Daniele and Cavalli, Alessandro and dell'Agnello, Luca and Gregori, Daniele and Prosperini, Andrea and Rinaldi, Lorenzo and Sapunenko, Vladimir and Vagnoni, Vincenzo}, + booktitle={Journal of Physics: Conference Series}, + volume={396}, + pages={042051}, + year={2012}, + organization={IOP Publishing} +} + +@misc{ref:srm, + title = {{The Storage Resource Manager Interface Specification,Version 2.2}}, + year = 2009, + note = {\url{https://sdm.lbl.gov/srm-wg/doc/SRM.v2.2.html}} +} + +@misc{ref:webdav, + title = {{HTTP Extensions for Web Distributed Authoring and Versioning (WebDAV)}}, + year = 2007, + note = {\url{https://tools.ietf.org/html/rfc4918}} +} + +@misc{ref:gitflow, + title = {{Gitflow Workflow}}, + note = {\url{https://it.atlassian.com/git/tutorials/comparing-workflows/gitflow-workflow}}, + urldate = {2019-07-08} +} + +@misc{ref:grpc, + title = {{gRPC}}, + note = {\url{https://grpc.io}}, + urldate= {2019-07-05} +} + +@misc{ref:protocol-buffers, + title = {{Protocol Buffers}}, + note = {\url{https://developers.google.com/protocol-buffers/}}, + urldate= {2019-07-05} +} +@misc{ref:rf, + title = {{Robot Framework}}, + note = {\url{https://robotframework.org}}, + urldate = {2019-07-05} +} + +@misc{ref:docker, + title = {{Enterprise Container Platform for High-Velocity Innovation}}, + note = {\url{https://www.docker.com}}, + urldate = {2019-07-08} +} + +@misc{ref:dc, + title = {{Docker Compose}}, + note = {\url{https://docs.docker.com/compose}}, + urldate = {2019-07-08} +} + + +@misc{ref:glcip, + title = {{Creating and using CI/CD pipelines}}, + note = {\url{https://docs.gitlab.com/ee/ci/pipelines.html}}, + urldate = {2019-07-08} +} + + +@misc{p1003.1e, + title = {{POSIX Access Control Lists, IEEE Draft P1003.1e}}, + year = 1997 +} + +@InCollection{ref:nginx-voms, + author = {Ceccanti, Andrea and Giacomini, Francesco and Ronchieri, Elisabetta and Terranova, Nicholas}, + title = {{A VOMS module for the Nginx web server}}, + booktitle = {2018 CNAF Annual Report}, + publisher = {INFN-CNAF}, + year = 2019, + note = {\url{https://www.cnaf.infn.it/annual-report}}} + +@Misc{ref:boost.fs, + title = {{Boost Filesystem}}, + note = {\url{https://www.boost.org/doc/libs/release/libs/filesystem}}} + + +@Misc{ref:boost.log, + author = {Semashev, Andrey}, + title = {{Boost Log}}, + note = {\url{https://www.boost.org/doc/libs/release/libs/log}}} + +@Misc{ref:yaml-cpp, + author = {Beder, Jesse}, + title = {yaml-cpp}, + note = {\url{https://github.com/jbeder/yaml-cpp}}} + +@techreport{jwt, + author = {Michael B. Jones and John Bradley and Nat Sakimura}, + title = {{The JSON Web Token RFC}}, + type = {RFC}, + number = 7519, + year = {2015}, + month = {May}, + issn = {2070-1721}, + publisher = {IETF Tools}, + institution = {IETF Tools}, + url = {https://tools.ietf.org/rfc/rfc7519.txt} +} + +@techreport{oauth, + author = {Dick Hardt}, + title = {{The OAuth 2.0 Authorization Framework}}, + type = {RFC}, + number = 6749, + year = {2012}, + month = {October}, + issn = {2070-1721}, + publisher = {IETF Tools}, + institution = {IETF Tools}, + url = {https://tools.ietf.org/rfc/rfc6749.txt} +} + +@techreport{oauth-token-exchange, + author = {Michael B. Jones and Anthony Nadalin and Brian Campbell + and John Bradley and Chuck Mortimore}, + title = {{OAuth 2.0 Token Exchange}}, + type = {Internet-Draft}, + number = "draft-ietf-oauth-token-exchange-16.txt", + year = {2019}, + month = {April}, + day = {22}, + institution = {IETF Tools}, + url = {https://tools.ietf.org/id/draft-ietf-oauth-token-exchange-16.txt} +} + +@techreport{oauth-metadata, + author = {Michael B. Jones and Nat Sakimura and John Bradley}, + title = {{OAuth 2.0 Authorization Server Metadata}}, + type = {RFC}, + number = 8414, + year = {2018}, + month = {June}, + issn = {2070-1721}, + publisher = {IETF Tools}, + institution = {IETF Tools}, + url = {https://tools.ietf.org/rfc/rfc8414.txt} +} + +@misc{oidc, + author = {{OpenID Foundation}}, + title = {{The OpenID Connect identity layer}}, + year = 2018, + note = {\url{https://openid.net/connect/}}, + urldate = {2018-12-03} +} + +@misc{oidc-discovery, + author = {{Nat Sakimura and John Bradley and Michael B. Jones and Edmund Jay}}, + title = {{The OpenID Connect discovery specification}}, + year = 2014, + note = {\url{https://openid.net/specs/openid-connect-discovery-1_0.html}}, + urldate = {2018-12-03} +} + +@misc{igtf, + title = {{The Interoperable Global Trust Federation}}, + note = {\url{https://www.igtf.net/}}, + urldate = {2018-12-03} +} + +@misc{x509, + title = {{X.509}}, + note = {\url{https://en.wikipedia.org/wiki/X.509}}, + urldate = {2018-12-03} +} + +@article{GSI, + author = {Von Welch and + Frank Siebenlist and + Ian T. Foster and + John Bresnahan and + Karl Czajkowski and + Jarek Gawor and + Carl Kesselman and + Sam Meder and + Laura Pearlman and + Steven Tuecke}, + title = {Security for Grid Services}, + journal = {CoRR}, + volume = {cs.CR/0306129}, + year = {2003}, + url = {http://arxiv.org/abs/cs.CR/0306129}, + timestamp = {Mon, 13 Aug 2018 16:49:07 +0200}, + biburl = {https://dblp.org/rec/bib/journals/corr/cs-CR-0306129}, + bibsource = {dblp computer science bibliography, https://dblp.org} +} + +@software{VOMS, + author = {Vincenzo Ciaschini and Valerio Venturi and Andrea Ceccanti}, + title = {{The Virtual Organisation Membership Service}}, + doi = {10.5281/zenodo.1875371}, + url = {https://doi.org/10.5281/zenodo.1875371} +} + +@misc{edugain, + title = {{eduGAIN interfederation website}}, + note = {\url{http://www.geant.org/Services/Trust_identity_and_security/eduGAIN}}, + urldate = {2018-12-03} +} + +@misc{google, + title = {{The Google Identity Platform}}, + note = {\url{https://developers.google.com/identity/}}, + urldate = {2018-12-03} +} + +@misc{scim, + title = {{The System for Cross Domain Identity Management website}}, + note = {\url{http://www.simplecloud.info/}}, + urldate = {2018-12-03} +} + +@article{indigo-aai-chep2016, + author={Andrea Ceccanti and Marcus Hardt and Bas Wegh and A. Paul Millar + and Marco Caberletti and Enrico Vianello and Slavek Licehammer}, + title={{The INDIGO-Datacloud Authentication and Authorization Infrastructure}}, + journal={Journal of Physics: Conference Series}, + volume={898}, + number={10}, + pages={102016}, + url={http://iopscience.iop.org/article/10.1088/1742-6596/898/10/102016}, + year={2017} +} + + +@software{iam, + author = {Andrea Ceccanti and Enrico Vianello and Marco Caberletti}, + title = {{INDIGO Identity and Access Management (IAM)}}, + doi = {10.5281/zenodo.1874790}, + url = {https://doi.org/10.5281/zenodo.1874790} +} + + +@software{voms-admin, + author = {Andrea Ceccanti}, + title = {{The VOMS administration service}}, + doi = {10.5281/zenodo.1875616}, + url = {https://doi.org/10.5281/zenodo.1875616} +} + +@misc{cwp, +Author = {{HEP Software Foundation} and Johannes Albrecht and Antonio + Augusto {Alves} Jr and Guilherme Amadio and Giuseppe Andronico and Nguyen + Anh-Ky and Laurent Aphecetche and John Apostolakis and Makoto Asai and Luca + Atzori and Marian Babik and Giuseppe Bagliesi and Marilena Bandieramonte + and Sunanda Banerjee and Martin Barisits and Lothar A. T. Bauerdick and + Stefano Belforte and Douglas Benjamin and Catrin Bernius and Wahid Bhimji + and Riccardo Maria Bianchi and Ian Bird and Catherine Biscarat and Jakob + Blomer and Kenneth Bloom and Tommaso Boccali and Brian Bockelman and Tomasz + Bold and Daniele Bonacorsi and Antonio Boveia and Concezio Bozzi and Marko + Bracko and David Britton and Andy Buckley and Predrag Buncic and Paolo + Calafiura and Simone Campana and Philippe Canal and Luca Canali and + Gianpaolo Carlino and Nuno Castro and Marco Cattaneo and Gianluca Cerminara + and Javier Cervantes Villanueva and Philip Chang and John Chapman and Gang + Chen and Taylor Childers and Peter Clarke and Marco Clemencic and Eric + Cogneras and Jeremy Coles and Ian Collier and David Colling and Gloria + Corti and Gabriele Cosmo and Davide Costanzo and Ben Couturier and Kyle + Cranmer and Jack Cranshaw and Leonardo Cristella and David Crooks and + Sabine Crépé-Renaudin and Robert Currie and Sünje Dallmeier-Tiessen and + Kaushik De and Michel De Cian and Albert De Roeck and Antonio Delgado Peris + and Frédéric Derue and Alessandro Di Girolamo and Salvatore Di Guida and + Gancho Dimitrov and Caterina Doglioni and Andrea Dotti and Dirk Duellmann + and Laurent Duflot and Dave Dykstra and Katarzyna Dziedziniewicz-Wojcik and + Agnieszka Dziurda and Ulrik Egede and Peter Elmer and Johannes Elmsheuser + and V. Daniel Elvira and Giulio Eulisse and Steven Farrell and Torben + Ferber and Andrej Filipcic and Ian Fisk and Conor Fitzpatrick and José Flix + and Andrea Formica and Alessandra Forti and Giovanni Franzoni and James + Frost and Stu Fuess and Frank Gaede and Gerardo Ganis and Robert Gardner + and Vincent Garonne and Andreas Gellrich and Krzysztof Genser and Simon + George and Frank Geurts and Andrei Gheata and Mihaela Gheata and Francesco + Giacomini and Stefano Giagu and Manuel Giffels and Douglas Gingrich and + Maria Girone and Vladimir V. Gligorov and Ivan Glushkov and Wesley Gohn and + Jose Benito Gonzalez Lopez and Isidro González Caballero and Juan R. + González Fernández and Giacomo Govi and Claudio Grandi and Hadrien Grasland + and Heather Gray and Lucia Grillo and Wen Guan and Oliver Gutsche and + Vardan Gyurjyan and Andrew Hanushevsky and Farah Hariri and Thomas Hartmann + and John Harvey and Thomas Hauth and Benedikt Hegner and Beate Heinemann + and Lukas Heinrich and Andreas Heiss and José M. Hernández and Michael + Hildreth and Mark Hodgkinson and Stefan Hoeche and Burt Holzman and Peter + Hristov and Xingtao Huang and Vladimir N. Ivanchenko and Todor Ivanov and + Jan Iven and Brij Jashal and Bodhitha Jayatilaka and Roger Jones and Michel + Jouvin and Soon Yung Jun and Michael Kagan and Charles William Kalderon and + Meghan Kane and Edward Karavakis and Daniel S. Katz and Dorian Kcira and + Oliver Keeble and Borut Paul Kersevan and Michael Kirby and Alexei + Klimentov and Markus Klute and Ilya Komarov and Dmitri Konstantinov and + Patrick Koppenburg and Jim Kowalkowski and Luke Kreczko and Thomas Kuhr and + Robert Kutschke and Valentin Kuznetsov and Walter Lampl and Eric Lancon and + David Lange and Mario Lassnig and Paul Laycock and Charles Leggett and + James Letts and Birgit Lewendel and Teng Li and Guilherme Lima and Jacob + Linacre and Tomas Linden and Miron Livny and Giuseppe Lo Presti and + Sebastian Lopienski and Peter Love and Adam Lyon and Nicolò Magini and + Zachary L. Marshall and Edoardo Martelli and Stewart Martin-Haugh and Pere + Mato and Kajari Mazumdar and Thomas McCauley and Josh McFayden and Shawn + McKee and Andrew McNab and Rashid Mehdiyev and Helge Meinhard and Dario + Menasce and Patricia Mendez Lorenzo and Alaettin Serhan Mete and Michele + Michelotto and Jovan Mitrevski and Lorenzo Moneta and Ben Morgan and + Richard Mount and Edward Moyse and Sean Murray and Armin Nairz and Mark S. + Neubauer and Andrew Norman and Sérgio Novaes and Mihaly Novak and Arantza + Oyanguren and Nurcan Ozturk and Andres Pacheco Pages and Michela Paganini + and Jerome Pansanel and Vincent R. Pascuzzi and Glenn Patrick and Alex + Pearce and Ben Pearson and Kevin Pedro and Gabriel Perdue and Antonio + Perez-Calero Yzquierdo and Luca Perrozzi and Troels Petersen and Marko + Petric and Andreas Petzold and Jónatan Piedra and Leo Piilonen and Danilo + Piparo and Jim Pivarski and Witold Pokorski and Francesco Polci and Karolos + Potamianos and Fernanda Psihas and Albert Puig Navarro and Günter Quast and + Gerhard Raven and Jürgen Reuter and Alberto Ribon and Lorenzo Rinaldi and + Martin Ritter and James Robinson and Eduardo Rodrigues and Stefan Roiser + and David Rousseau and Gareth Roy and Grigori Rybkine and Andre Sailer and + Tai Sakuma and Renato Santana and Andrea Sartirana and Heidi Schellman and + Jaroslava Schovancová and Steven Schramm and Markus Schulz and Andrea + Sciabà and Sally Seidel and Sezen Sekmen and Cedric Serfon and Horst + Severini and Elizabeth Sexton-Kennedy and Michael Seymour and Davide + Sgalaberna and Illya Shapoval and Jamie Shiers and Jing-Ge Shiu and Hannah + Short and Gian Piero Siroli and Sam Skipsey and Tim Smith and Scott Snyder + and Michael D. Sokoloff and Panagiotis Spentzouris and Hartmut Stadie and + Giordon Stark and Gordon Stewart and Graeme A. Stewart and Arturo Sánchez + and Alberto Sánchez-Hernández and Anyes Taffard and Umberto Tamponi and + Jeff Templon and Giacomo Tenaglia and Vakhtang Tsulaia and Christopher + Tunnell and Eric Vaandering and Andrea Valassi and Sofia Vallecorsa and + Liviu Valsan and Peter Van Gemmeren and Renaud Vernet and Brett Viren and + Jean-Roch Vlimant and Christian Voss and Margaret Votava and Carl Vuosalo + and Carlos Vázquez Sierra and Romain Wartel and Gordon T. Watts and Torre + Wenaus and Sandro Wenzel and Mike Williams and Frank Winklmeier and + Christoph Wissing and Frank Wuerthwein and Benjamin Wynne and Zhang Xiaomei + and Wei Yang and Efe Yazgan}, Title = {{A Roadmap for HEP Software and + Computing R\&D for the 2020s}}, Year = {2017}, Eprint = {arXiv:1712.06982}, +} + +@misc{scitokens, + title = {{The SciTokens project}}, + note = {\url{https://scitokens.org}}, + urldate = {2018-12-03} +} + +@misc{kubernetes, + title = {{The Kubernetes container orchestrator}}, + note = {\url{https://kubernetes.io}}, + urldate = {2018-12-03} +} + +@misc{openstack, + title = {{The Openstack IAAS framework}}, + note = {\url{https://www.openstack.org}}, + urldate = {2018-12-03} +} + +@misc{fts, + title = {{The CERN File Transfer Service}}, + note = {\url{https://fts.web.cern.ch}}, + urldate = {2018-12-03} +} + +@misc{storm, + title = {{The StoRM storage element}}, + note = {\url{https://italiangrid.github.io/storm}}, + urldate = {2018-12-03} +} + +@misc{dcache, + title = {{The dCache storage solution}}, + note = {\url{https://dcache.org}}, + urldate = {2018-12-03} +} + +@misc{oidc-rande, + title = {{The OpenID Research \& Education working group}}, + note = {\url{https://openid.net/wg/rande}}, + urldate = {2018-12-03} +} + +@techreport{voms-ac-format, + author = {Vincenzo Ciaschini and Valerio Venturi and Andrea Ceccanti}, + title = {{The VOMS Attribute Certificate format }}, + year = {2011}, + month = {August}, + publisher = {Open Grid Forum}, + institution = {Open Grid Forum}, + url = {https://www.ogf.org/documents/GFD.182.pdf} +} + +@misc{aarc-blueprint, + title = {{The AARC Blueprint Architecture}}, + note = {\url{https://aarc-project.eu/architecture}}, + urldate = {2018-12-03} +} + +@misc{rcauth-ssh, + title = {{RCAuth.eu: getting proxies using SSH key AuthN}}, + author = {Mischa Sall\'e}, + note = {\url{https://indico.cern.ch/event/669715/contributions/2739035/attachments/1532101/2398499/RCauth_SSH_wlcg_authz_wg.pdf}}, + urldate = {2018-12-03} +} + +@misc{oauth4myproxy, + title = {{OAuth for MyProxy}}, + note = {\url{http://grid.ncsa.illinois.edu/myproxy/oauth/}}, + urldate = {2019-03-18} +} + +@misc{rcauth, + title = {{The RCAuth online CA}}, + note = {\url{https://rcauth.eu}}, + urldate = {2018-12-03} +} + +@misc{dodas, + title = {{Dynamic On Demand Analysis Service: DODAS}}, + note = {\url{https://dodas-ts.github.io/dodas-doc}}, + urldate = {2018-12-03} +} + +@misc{eosc-hub, + title = {{The EOSC-Hub project}}, + note = {\url{https://www.eosc-hub.eu}}, + urldate = {2018-12-03} +} + +@misc{aarc, + title = {{The AARC project}}, + note = {\url{https://aarc-project.eu}}, + urldate = {2018-12-03} +} + +@misc{fim4r, + title = {{Federated Identity Management for Research}}, + note = {\url{https://fim4r.org}}, + urldate = {2018-12-03} +} + +@misc{wlcg-authz-wg, + title = {{The WLCG Authorization Working Group}}, + note = {\url{https://twiki.cern.ch/twiki/bin/view/LCG/WLCGAuthorizationWG}}, + urldate = {2018-12-03} +} + +@misc{nikhef, + title = {{The Dutch National Insititute for Sub-atomic Physics}}, + note = {\url{https://www.nikhef.nl}}, + urldate = {2019-5-10} +} + +@misc{indigo-datacloud, + Author = {INDIGO-DataCloud Collaboration and : and Davide Salomoni and Isabel + Campos and Luciano Gaido and Jesus Marco de Lucas and Peter Solagna and Jorge + Gomes and Ludek Matyska and Patrick Fuhrman and Marcus Hardt and Giacinto + Donvito and Lukasz Dutka and Marcin Plociennik and Roberto Barbera and + Ignacio Blanquer and Andrea Ceccanti and Mario David and Cristina Duma and + Alvaro López-GarcÃa and Germán Moltó and Pablo Orviz and Zdenek Sustr and + Matthew Viljoen and Fernando Aguilar and Luis Alves and Marica Antonacci + and Lucio Angelo Antonelli and Stefano Bagnasco and Alexandre M. J. J. + Bonvin and Riccardo Bruno and Eva Cetinic and Yin Chen and Alessandro Costa + and Davor Davidovic and Benjamin Ertl and Marco Fargetta and Sandro Fiore + and Stefano Gallozzi and Zeynep Kurkcuoglu and Lara Lloret and Joao Martins + and Alessandra Nuzzo and Paola Nassisi and Cosimo Palazzo and Joao Pina and + Eva Sciacca and Daniele Spiga and Marco Antonio Tangaro and Michal Urbaniak + and Sara Vallero and Bas Wegh and Valentina Zaccolo and Federico Zambelli + and Tomasz Zok}, + Title = {{INDIGO-DataCloud:A data and computing platform to facilitate seamless + access to e-infrastructures}}, + Year = {2017}, + Eprint = {arXiv:1711.01981}, +} + +@misc{kubernetes-labels, + title = {{Kubernetes labels and selectors}}, + note = {\url{https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/}}, + urldate = {2018-12-03} +} + +@misc{spid, + title = {{Sistema Pubblico di Identità Digitale}}, + note = {\url{https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/}}, + urldate = {2018-12-03} +} + +@misc{hr-db-api-service, + title = {{CERN HR DB API service }}, + note = {\url{https://baltig.infn.it/aceccant/cern-hr-db-service}}, + urldate = {2018-12-03} +} + +@misc{cern-openshift, + title = {{CERN Openshift PAAS infrastructure}}, + note = {\url{http://information-technology.web.cern.ch/services/PaaS-Web-App}}, + urldate = {2018-12-03} +} +@misc{keycloak, + title = {{The Keycloak Identity and Access Management system}}, + note = {\url{https://www.keycloak.org/}}, + urldate = {2018-12-03} +} + +@inproceedings{cern-sso, + doi = {10.1088/1742-6596/119/8/082008}, + url = {https://doi.org/10.1088%2F1742-6596%2F119%2F8%2F082008}, + year = 2008, + volume = {119}, + number = {8}, + pages = {082008}, + author = {E Ormancey}, + title = {{CERN} single sign on solution}, + booktitle = {Journal of Physics: Conference Series} +} + +@inproceedings{voms-convergence, + author={Andrea Ceccanti and Vincenzo Ciaschini and Maria Dimou and Gabriele Garzoglio and Tanya Levshina and Steve Traylen and Valerio Venturi}, + title={{VOMS/VOMRS utilization patterns and convergence plan}}, + booktitle={Journal of Physics: Conference Series}, + volume={219}, + number={6}, + pages={062006}, + url={http://stacks.iop.org/1742-6596/219/i=6/a=062006}, + year={2010} +} diff --git a/contributions/sd_storm2/high-level-arch.png b/contributions/sd_storm2/high-level-arch.png new file mode 100644 index 0000000000000000000000000000000000000000..4e6a14b077841497bd8cd62943e1d3f54a6ed6a9 Binary files /dev/null and b/contributions/sd_storm2/high-level-arch.png differ diff --git a/contributions/sd_storm2/iopams.sty b/contributions/sd_storm2/iopams.sty new file mode 100644 index 0000000000000000000000000000000000000000..044dde929745d48d13601b572a0f586728ebf0a4 --- /dev/null +++ b/contributions/sd_storm2/iopams.sty @@ -0,0 +1,87 @@ +%% +%% This is file `iopams.sty' +%% File to include AMS fonts and extra definitions for bold greek +%% characters for use with iopart.cls +%% +\NeedsTeXFormat{LaTeX2e} +\ProvidesPackage{iopams}[1997/02/13 v1.0] +\RequirePackage{amsgen}[1995/01/01] +\RequirePackage{amsfonts}[1995/01/01] +\RequirePackage{amssymb}[1995/01/01] +\RequirePackage{amsbsy}[1995/01/01] +% +\iopamstrue % \newif\ifiopams in iopart.cls & iopbk2e.cls +% % allows optional text to be in author guidelines +% +% Bold lower case Greek letters +% +\newcommand{\balpha}{\boldsymbol{\alpha}} +\newcommand{\bbeta}{\boldsymbol{\beta}} +\newcommand{\bgamma}{\boldsymbol{\gamma}} +\newcommand{\bdelta}{\boldsymbol{\delta}} +\newcommand{\bepsilon}{\boldsymbol{\epsilon}} +\newcommand{\bzeta}{\boldsymbol{\zeta}} +\newcommand{\bfeta}{\boldsymbol{\eta}} +\newcommand{\btheta}{\boldsymbol{\theta}} +\newcommand{\biota}{\boldsymbol{\iota}} +\newcommand{\bkappa}{\boldsymbol{\kappa}} +\newcommand{\blambda}{\boldsymbol{\lambda}} +\newcommand{\bmu}{\boldsymbol{\mu}} +\newcommand{\bnu}{\boldsymbol{\nu}} +\newcommand{\bxi}{\boldsymbol{\xi}} +\newcommand{\bpi}{\boldsymbol{\pi}} +\newcommand{\brho}{\boldsymbol{\rho}} +\newcommand{\bsigma}{\boldsymbol{\sigma}} +\newcommand{\btau}{\boldsymbol{\tau}} +\newcommand{\bupsilon}{\boldsymbol{\upsilon}} +\newcommand{\bphi}{\boldsymbol{\phi}} +\newcommand{\bchi}{\boldsymbol{\chi}} +\newcommand{\bpsi}{\boldsymbol{\psi}} +\newcommand{\bomega}{\boldsymbol{\omega}} +\newcommand{\bvarepsilon}{\boldsymbol{\varepsilon}} +\newcommand{\bvartheta}{\boldsymbol{\vartheta}} +\newcommand{\bvaromega}{\boldsymbol{\varomega}} +\newcommand{\bvarrho}{\boldsymbol{\varrho}} +\newcommand{\bvarzeta}{\boldsymbol{\varsigma}} %NB really sigma +\newcommand{\bvarsigma}{\boldsymbol{\varsigma}} +\newcommand{\bvarphi}{\boldsymbol{\varphi}} +% +% Bold upright capital Greek letters +% +\newcommand{\bGamma}{\boldsymbol{\Gamma}} +\newcommand{\bDelta}{\boldsymbol{\Delta}} +\newcommand{\bTheta}{\boldsymbol{\Theta}} +\newcommand{\bLambda}{\boldsymbol{\Lambda}} +\newcommand{\bXi}{\boldsymbol{\Xi}} +\newcommand{\bPi}{\boldsymbol{\Pi}} +\newcommand{\bSigma}{\boldsymbol{\Sigma}} +\newcommand{\bUpsilon}{\boldsymbol{\Upsilon}} +\newcommand{\bPhi}{\boldsymbol{\Phi}} +\newcommand{\bPsi}{\boldsymbol{\Psi}} +\newcommand{\bOmega}{\boldsymbol{\Omega}} +% +% Bold versions of miscellaneous symbols +% +\newcommand{\bpartial}{\boldsymbol{\partial}} +\newcommand{\bell}{\boldsymbol{\ell}} +\newcommand{\bimath}{\boldsymbol{\imath}} +\newcommand{\bjmath}{\boldsymbol{\jmath}} +\newcommand{\binfty}{\boldsymbol{\infty}} +\newcommand{\bnabla}{\boldsymbol{\nabla}} +\newcommand{\bdot}{\boldsymbol{\cdot}} +% +% Symbols for caption +% +\renewcommand{\opensquare}{\mbox{$\square$}} +\renewcommand{\opentriangle}{\mbox{$\vartriangle$}} +\renewcommand{\opentriangledown}{\mbox{$\triangledown$}} +\renewcommand{\opendiamond}{\mbox{$\lozenge$}} +\renewcommand{\fullsquare}{\mbox{$\blacksquare$}} +\newcommand{\fulldiamond}{\mbox{$\blacklozenge$}} +\newcommand{\fullstar}{\mbox{$\bigstar$}} +\newcommand{\fulltriangle}{\mbox{$\blacktriangle$}} +\newcommand{\fulltriangledown}{\mbox{$\blacktriangledown$}} + +\endinput +%% +%% End of file `iopams.sty'. diff --git a/contributions/sd_storm2/jpconf.cls b/contributions/sd_storm2/jpconf.cls new file mode 100644 index 0000000000000000000000000000000000000000..09f509fdcfde0543cfbc37e4f64c02e11d9b4972 --- /dev/null +++ b/contributions/sd_storm2/jpconf.cls @@ -0,0 +1,957 @@ +\NeedsTeXFormat{LaTeX2e}[1995/12/01] +\ProvidesClass{jpconf} + [2007/03/07 v1.1 + LaTeX class for Journal of Physics: Conference Series] +%\RequirePackage{graphicx} +\newcommand\@ptsize{1} +\newif\if@restonecol +\newif\if@letterpaper +\newif\if@titlepage +\newif\ifiopams +\@titlepagefalse +\@letterpaperfalse +\DeclareOption{a4paper} + {\setlength\paperheight {297mm}% + \setlength\paperwidth {210mm}% +\@letterpaperfalse} +\DeclareOption{letterpaper} + {\setlength\paperheight {279.4mm}% + \setlength\paperwidth {215.9mm}% +\@letterpapertrue} +\DeclareOption{landscape} + {\setlength\@tempdima {\paperheight}% + \setlength\paperheight {\paperwidth}% + \setlength\paperwidth {\@tempdima}} +\DeclareOption{twoside}{\@twosidetrue \@mparswitchtrue} +\renewcommand\@ptsize{1} +%\ExecuteOptions{A4paper, twoside} +\ExecuteOptions{A4paper} +\ProcessOptions +\DeclareMathAlphabet{\bi}{OML}{cmm}{b}{it} +\DeclareMathAlphabet{\bcal}{OMS}{cmsy}{b}{n} +\input{jpconf1\@ptsize.clo} +\setlength\lineskip{1\p@} +\setlength\normallineskip{1\p@} +\renewcommand\baselinestretch{} +\setlength\parskip{0\p@ \@plus \p@} +\@lowpenalty 51 +\@medpenalty 151 +\@highpenalty 301 +\setlength\parindent{5mm} +\setcounter{topnumber}{8} +\renewcommand\topfraction{1} +\setcounter{bottomnumber}{3} +\renewcommand\bottomfraction{.99} +\setcounter{totalnumber}{8} +\renewcommand\textfraction{0.01} +\renewcommand\floatpagefraction{.8} +\setcounter{dbltopnumber}{6} +\renewcommand\dbltopfraction{1} +\renewcommand\dblfloatpagefraction{.8} +\renewcommand{\title}{\@ifnextchar[{\@stitle}{\@ftitle}} +\pretolerance=5000 +\tolerance=8000 +% Headings for all pages apart from first +% +\def\ps@headings{% + \let\@oddfoot\@empty + \let\@evenfoot\@empty + \let\@oddhead\@empty + \let\@evenhead\@empty + %\def\@evenhead{\thepage\hfil\itshape\rightmark}% + %\def\@oddhead{{\itshape\leftmark}\hfil\thepage}% + %\def\@evenhead{{\itshape Journal of Physics: Conference Series}\hfill}% + %\def\@oddhead{\hfill {\itshape Journal of Physics: Conference Series}}%% + \let\@mkboth\markboth + \let\sectionmark\@gobble + \let\subsectionmark\@gobble} +% +% Headings for first page +% +\def\ps@myheadings{\let\@oddfoot\@empty\let\@evenfoot\@empty + \let\@oddhead\@empty\let\@evenhead\@empty + \let\@mkboth\@gobbletwo + \let\sectionmark\@gobble + \let\subsectionmark\@gobble} +% +\def\@stitle[#1]#2{\markboth{#1}{#1}% + %\pagestyle{empty}% + \thispagestyle{myheadings} + \vspace*{25mm}{\exhyphenpenalty=10000\hyphenpenalty=10000 + %\Large +\fontsize{18bp}{24bp}\selectfont\bf\raggedright\noindent#2\par}} +\def\@ftitle#1{\markboth{#1}{#1}% + \thispagestyle{myheadings} +%\pagestyle{empty}% + \vspace*{25mm}{\exhyphenpenalty=10000\hyphenpenalty=10000 + %\Large\raggedright\noindent\bf#1\par} +\fontsize{18bp}{24bp}\selectfont\bf\noindent\raggedright#1\par}} +%AUTHOR +\renewcommand{\author}{\@ifnextchar[{\@sauthor}{\@fauthor}} +\def\@sauthor[#1]#2{\markright{#1} % for production only + \vspace*{1.5pc}% + \begin{indented}% + \item[]\normalsize\bf\raggedright#2 + \end{indented}% + \smallskip} +\def\@fauthor#1{%\markright{#1} for production only + \vspace*{1.5pc}% + \begin{indented}% + \item[]\normalsize\bf\raggedright#1 + \end{indented}% + \smallskip} +%E-MAIL +\def\eads#1{\vspace*{5pt}\address{E-mail: #1}} +\def\ead#1{\vspace*{5pt}\address{E-mail: \mailto{#1}}} +\def\mailto#1{{\tt #1}} +%ADDRESS +\newcommand{\address}[1]{\begin{indented} + \item[]\rm\raggedright #1 + \end{indented}} +\newlength{\indentedwidth} +\newdimen\mathindent +\mathindent = 6pc +\indentedwidth=\mathindent +% FOOTNOTES +%\renewcommand\footnoterule{% +% \kern-3\p@ +% \hrule\@width.4\columnwidth +% \kern2.6\p@} +%\newcommand\@makefntext[1]{% +% \parindent 1em% +% \noindent +% \hb@xt@1.8em{\hss\@makefnmark}#1} +% Footnotes: symbols selected in same order as address indicators +% unless optional argument of [<num>] use to specify required symbol, +% 1=\dag, 2=\ddag, etc +% Usage: \footnote{Text of footnote} +% \footnote[3]{Text of footnote} +% +\def\footnoterule{}% +\setcounter{footnote}{0} +\long\def\@makefntext#1{\parindent 1em\noindent + \makebox[1em][l]{\footnotesize\rm$\m@th{\fnsymbol{footnote}}$}% + \footnotesize\rm #1} +\def\@makefnmark{\normalfnmark} +\def\normalfnmark{\hbox{${\fnsymbol{footnote}}\m@th$}} +\def\altfnmark{\hbox{$^{\rm Note}\ {\fnsymbol{footnote}}\m@th$}} +\def\footNote#1{\let\@makefnmark\altfnmark\footnote{#1}\let\@makefnmark\normalfnmark} +\def\@thefnmark{\fnsymbol{footnote}} +\def\footnote{\protect\pfootnote} +\def\pfootnote{\@ifnextchar[{\@xfootnote}{\stepcounter{\@mpfn}% + \begingroup\let\protect\noexpand + \xdef\@thefnmark{\thempfn}\endgroup + \@footnotemark\@footnotetext}} +\def\@xfootnote[#1]{\setcounter{footnote}{#1}% + \addtocounter{footnote}{-1}\footnote} + +\newcommand\ftnote{\protect\pftnote} +\newcommand\pftnote[1]{\setcounter{footnote}{#1}% + \addtocounter{footnote}{-1}\footnote} +\newcommand{\fnm}[1]{\setcounter{footnote}{#1}\footnotetext} + +\def\@fnsymbol#1{\ifnum\thefootnote=99\hbox{*}\else^{\thefootnote}\fi\relax} +% +% Address marker +% +\newcommand{\ad}[1]{\noindent\hbox{$^{#1}$}\relax} +\newcommand{\adnote}[2]{\noindent\hbox{$^{#1,}$}\setcounter{footnote}{#2}% + \addtocounter{footnote}{-1}\footnote} +\def\@tnote{} +\newcounter{oldftnote} +\newcommand{\tnote}[1]{*\gdef\@tnote{% + \setcounter{oldftnote}{\c@footnote}% + \setcounter{footnote}{99}% + \footnotetext{#1}% + \setcounter{footnote}{\c@oldftnote}\addtocounter{footnote}{-1}}} +%================== +% Acknowledgments (no heading if letter) +% Usage \ack for Acknowledgments, \ackn for Acknowledgement +\def\ack{\section*{Acknowledgments}} +\def\ackn{\section*{Acknowledgment}} +%SECTION DEFINITIONS +\setcounter{secnumdepth}{3} +\newcounter {section} +\newcounter {subsection}[section] +\newcounter {subsubsection}[subsection] +\newcounter {paragraph}[subsubsection] +\newcounter {subparagraph}[paragraph] +\renewcommand \thesection {\arabic{section}} +\renewcommand\thesubsection {\thesection.\arabic{subsection}} +\renewcommand\thesubsubsection{\thesubsection .\arabic{subsubsection}} +\renewcommand\theparagraph {\thesubsubsection.\arabic{paragraph}} +\renewcommand\thesubparagraph {\theparagraph.\arabic{subparagraph}} +%\nosections +\def\nosections{\vspace{30\p@ plus12\p@ minus12\p@} + \noindent\ignorespaces} + +%\renewcommand{\@startsection}[6] +%{% +%\if@noskipsec \leavevmode \fi +%\par +% \@tempskipa #4\relax +%%\@tempskipa 0pt\relax +% \@afterindenttrue +% \ifdim \@tempskipa <\z@ +% \@tempskipa -\@tempskipa \@afterindentfalse +% \fi +% \if@nobreak +% \everypar{}% +% \else +% \addpenalty\@secpenalty\addvspace\@tempskipa +% \fi +% \@ifstar +% {\@ssect{#3}{#4}{#5}{#6}}% +% {\@dblarg{\@sect{#1}{#2}{#3}{#4}{#5}{#6}}}} +%\renewcommand{\@sect}[8]{% +% \ifnum #2>\c@secnumdepth +% \let\@svsec\@empty +% \else +% \refstepcounter{#1}% +% \protected@edef\@svsec{\@seccntformat{#1}\relax}% +% \fi +% \@tempskipa #5\relax +% \ifdim \@tempskipa>\z@ +% \begingroup +% #6{% +% \@hangfrom{\hskip #3\relax\@svsec}% +% \interlinepenalty \@M #8\@@par}% +% \endgroup +% \csname #1mark\endcsname{#7}% +% \addcontentsline{toc}{#1}{% +% \ifnum #2>\c@secnumdepth \else +% \protect\numberline{\csname the#1\endcsname}% +% \fi +% #7}% +% \else +% \def\@svsechd{% +% #6{\hskip #3\relax +% \@svsec #8}% +% \csname #1mark\endcsname{#7}% +% \addcontentsline{toc}{#1}{% +% \ifnum #2>\c@secnumdepth \else +% \protect\numberline{\csname the#1\endcsname}% +% \fi +% #7}}% +% \fi +% \@xsect{#5}} +%\renewcommand{\@xsect}[1]{% +% \@tempskipa #1\relax +% \ifdim \@tempskipa>\z@ +% \par \nobreak +% \vskip \@tempskipa +% \@afterheading +% \else +% \@nobreakfalse +% \global\@noskipsectrue +% \everypar{% +% \if@noskipsec +% \global\@noskipsecfalse +% {\setbox\z@\lastbox}% +% \clubpenalty\@M +% \begingroup \@svsechd \endgroup +% \unskip +% \@tempskipa #1\relax +% \hskip -\@tempskipa +% \else +% \clubpenalty \@clubpenalty +% \everypar{}% +% \fi}% +% \fi +% \ignorespaces} +%======================================================================== +\newcommand\section{\@startsection {section}{1}{\z@}% + {-3.25ex\@plus -1ex \@minus -.2ex}% + {1sp}% + {\reset@font\normalsize\bfseries\raggedright}} +\newcommand\subsection{\@startsection{subsection}{2}{\z@}% + {-3.25ex\@plus -1ex \@minus -.2ex}% + {1sp}% + {\reset@font\normalsize\itshape\raggedright}} +\newcommand\subsubsection{\@startsection{subsubsection}{3}{\z@}% + {-3.25ex\@plus -1ex \@minus -.2ex}% + {-1em \@plus .2em}% + {\reset@font\normalsize\itshape}} +\newcommand\paragraph{\@startsection{paragraph}{4}{\z@}% + {3.25ex \@plus1ex \@minus.2ex}% + {-1em}% + {\reset@font\normalsize\itshape}} +\newcommand\subparagraph{\@startsection{subparagraph}{5}{\parindent}% + {3.25ex \@plus1ex \@minus .2ex}% + {-1em}% + {\reset@font\normalsize\itshape}} +\def\@sect#1#2#3#4#5#6[#7]#8{\ifnum #2>\c@secnumdepth + \let\@svsec\@empty\else + \refstepcounter{#1}\edef\@svsec{\csname the#1\endcsname. }\fi + \@tempskipa #5\relax + \ifdim \@tempskipa>\z@ + \begingroup #6\relax + \noindent{\hskip #3\relax\@svsec}{\interlinepenalty \@M #8\par}% + \endgroup + \csname #1mark\endcsname{#7}\addcontentsline + {toc}{#1}{\ifnum #2>\c@secnumdepth \else + \protect\numberline{\csname the#1\endcsname}\fi + #7}\else + \def\@svsechd{#6\hskip #3\relax %% \relax added 2 May 90 + \@svsec #8\csname #1mark\endcsname + {#7}\addcontentsline + {toc}{#1}{\ifnum #2>\c@secnumdepth \else + \protect\numberline{\csname the#1\endcsname}\fi + #7}}\fi + \@xsect{#5}} +% +\def\@ssect#1#2#3#4#5{\@tempskipa #3\relax + \ifdim \@tempskipa>\z@ + \begingroup #4\noindent{\hskip #1}{\interlinepenalty \@M #5\par}\endgroup + \else \def\@svsechd{#4\hskip #1\relax #5}\fi + \@xsect{#3}} +% LIST DEFINITIONS +\setlength\leftmargini {2em} +\leftmargin \leftmargini +\setlength\leftmarginii {2em} +\setlength\leftmarginiii {1.8em} +\setlength\leftmarginiv {1.6em} + \setlength\leftmarginv {1em} + \setlength\leftmarginvi {1em} +\setlength\leftmargin{\leftmargini} +\setlength \labelsep {.5em} +\setlength \labelwidth{\leftmargini} +\addtolength\labelwidth{-\labelsep} +\@beginparpenalty -\@lowpenalty +\@endparpenalty -\@lowpenalty +\@itempenalty -\@lowpenalty +\renewcommand\theenumi{\roman{enumi}} +\renewcommand\theenumii{\alph{enumii}} +\renewcommand\theenumiii{\arabic{enumiii}} +\renewcommand\theenumiv{\Alph{enumiv}} +\newcommand\labelenumi{(\theenumi)} +\newcommand\labelenumii{(\theenumii)} +\newcommand\labelenumiii{\theenumiii.} +\newcommand\labelenumiv{(\theenumiv)} +\renewcommand\p@enumii{(\theenumi)} +\renewcommand\p@enumiii{(\theenumi.\theenumii)} +\renewcommand\p@enumiv{(\theenumi.\theenumii.\theenumiii)} +\newcommand\labelitemi{$\m@th\bullet$} +\newcommand\labelitemii{\normalfont\bfseries --} +\newcommand\labelitemiii{$\m@th\ast$} +\newcommand\labelitemiv{$\m@th\cdot$} +\renewcommand \theequation {\@arabic\c@equation} + +%%%%%%%%%%%%% Figures +\newcounter{figure} +\renewcommand\thefigure{\@arabic\c@figure} +\def\fps@figure{tbp} +\def\ftype@figure{1} +\def\ext@figure{lof} +\def\fnum@figure{\figurename~\thefigure} +\newenvironment{figure}{\footnotesize\rm\@float{figure}}% + {\end@float\normalsize\rm} +\newenvironment{figure*}{\footnotesize\rm\@dblfloat{figure}}{\end@dblfloat} +\newcounter{table} +\renewcommand\thetable{\@arabic\c@table} +\def\fps@table{tbp} +\def\ftype@table{2} +\def\ext@table{lot} +\def\fnum@table{\tablename~\thetable} +\newenvironment{table}{\footnotesize\rm\@float{table}}% + {\end@float\normalsize\rm} +\newenvironment{table*}{\footnotesize\rm\@dblfloat{table}}% + {\end@dblfloat\normalsize\rm} +\newlength\abovecaptionskip +\newlength\belowcaptionskip +\setlength\abovecaptionskip{10\p@} +\setlength\belowcaptionskip{0\p@} +%Table Environments +%\newenvironment{tableref}[3][\textwidth]{% +%\begin{center}% +%\begin{table}% +%\captionsetup[table]{width=#1} +%\centering\caption{\label{#2}#3}}{\end{table}\end{center}} +%%%%%%%%%%%%%%%%% +%\newcounter{figure} +%\renewcommand \thefigure {\@arabic\c@figure} +%\def\fps@figure{tbp} +%\def\ftype@figure{1} +%\def\ext@figure{lof} +%\def\fnum@figure{\figurename~\thefigure} +%ENVIRONMENT: figure +%\newenvironment{figure} +% {\@float{figure}} +% {\end@float} +%ENVIRONMENT: figure* +%\newenvironment{figure*} +% {\@dblfloat{figure}} +% {\end@dblfloat} +%ENVIRONMENT: table +%\newcounter{table} +%\renewcommand\thetable{\@arabic\c@table} +%\def\fps@table{tbp} +%\def\ftype@table{2} +%\def\ext@table{lot} +%\def\fnum@table{\tablename~\thetable} +%\newenvironment{table} +% {\@float{table}} +% {\end@float} +%ENVIRONMENT: table* +%\newenvironment{table*} +% {\@dblfloat{table}} +% {\end@dblfloat} +%\newlength\abovecaptionskip +%\newlength\belowcaptionskip +%\setlength\abovecaptionskip{10\p@} +%\setlength\belowcaptionskip{0\p@} +% CAPTIONS +% Added redefinition of \@caption so captions are not written to +% aux file therefore less need to \protect fragile commands +% +\long\def\@caption#1[#2]#3{\par\begingroup + \@parboxrestore + \normalsize + \@makecaption{\csname fnum@#1\endcsname}{\ignorespaces #3}\par + \endgroup} +\long\def\@makecaption#1#2{% + \vskip\abovecaptionskip + \sbox\@tempboxa{{\bf #1.} #2}% + \ifdim \wd\@tempboxa >\hsize + {\bf #1.} #2\par + \else + \global \@minipagefalse + \hb@xt@\hsize{\hfil\box\@tempboxa\hfil}% + \fi + \vskip\belowcaptionskip} +\DeclareOldFontCommand{\rm}{\normalfont\rmfamily}{\mathrm} +\DeclareOldFontCommand{\sf}{\normalfont\sffamily}{\mathsf} +\DeclareOldFontCommand{\tt}{\normalfont\ttfamily}{\mathtt} +\DeclareOldFontCommand{\bf}{\normalfont\bfseries}{\mathbf} +\DeclareOldFontCommand{\it}{\normalfont\itshape}{\mathit} +\DeclareOldFontCommand{\sl}{\normalfont\slshape}{\@nomath\sl} +\DeclareOldFontCommand{\sc}{\normalfont\scshape}{\@nomath\sc} +\DeclareRobustCommand*\cal{\@fontswitch\relax\mathcal} +\DeclareRobustCommand*\mit{\@fontswitch\relax\mathnormal} +%\newcommand\@pnumwidth{1.55em} +%\newcommand\@tocrmarg{2.55em} +%\newcommand\@dotsep{4.5} +%\setcounter{tocdepth}{3} +%\newcommand\tableofcontents{% +% \section*{\contentsname +% \@mkboth{% +% \MakeUppercase\contentsname}{\MakeUppercase\contentsname}}% +% \@starttoc{toc}% +% } +%\newcommand*\l@part[2]{% +% \ifnum \c@tocdepth >-2\relax +% \addpenalty\@secpenalty +% \addvspace{2.25em \@plus\p@}% +% \begingroup +% \parindent \z@ \rightskip \@pnumwidth +% \parfillskip -\@pnumwidth +% {\leavevmode +% \large \bfseries #1\hfil \hb@xt@\@pnumwidth{\hss #2}}\par +% \nobreak +% \if@compatibility +% \global\@nobreaktrue +% \everypar{\global\@nobreakfalse\everypar{}}% +% \fi +% \endgroup +% \fi} +%\newcommand*\l@section[2]{% +% \ifnum \c@tocdepth >\z@ +% \addpenalty\@secpenalty +% \addvspace{1.0em \@plus\p@}% +% \setlength\@tempdima{1.5em}% +% \begingroup +% \parindent \z@ \rightskip \@pnumwidth +% \parfillskip -\@pnumwidth +% \leavevmode \bfseries +% \advance\leftskip\@tempdima +% \hskip -\leftskip +% #1\nobreak\hfil \nobreak\hb@xt@\@pnumwidth{\hss #2}\par +% \endgroup +% \fi} +%\newcommand*\l@subsection{\@dottedtocline{2}{1.5em}{2.3em}} +%\newcommand*\l@subsubsection{\@dottedtocline{3}{3.8em}{3.2em}} +%\newcommand*\l@paragraph{\@dottedtocline{4}{7.0em}{4.1em}} +%\newcommand*\l@subparagraph{\@dottedtocline{5}{10em}{5em}} +%\newcommand\listoffigures{% +% \section*{\listfigurename +% \@mkboth{\MakeUppercase\listfigurename}% +% {\MakeUppercase\listfigurename}}% +% \@starttoc{lof}% +% } +%\newcommand*\l@figure{\@dottedtocline{1}{1.5em}{2.3em}} +%\newcommand\listoftables{% +% \section*{\listtablename +% \@mkboth{% +% \MakeUppercase\listtablename}{\MakeUppercase\listtablename}}% +% \@starttoc{lot}% +% } +%\let\l@table\l@figure +%====================================== +%ENVIRONMENTS +%====================================== +%ENVIRONMENT: indented +\newenvironment{indented}{\begin{indented}}{\end{indented}} +\newenvironment{varindent}[1]{\begin{varindent}{#1}}{\end{varindent}} +% +\def\indented{\list{}{\itemsep=0\p@\labelsep=0\p@\itemindent=0\p@ + \labelwidth=0\p@\leftmargin=\mathindent\topsep=0\p@\partopsep=0\p@ + \parsep=0\p@\listparindent=15\p@}\footnotesize\rm} +\let\endindented=\endlist +\def\varindent#1{\setlength{\varind}{#1}% + \list{}{\itemsep=0\p@\labelsep=0\p@\itemindent=0\p@ + \labelwidth=0\p@\leftmargin=\varind\topsep=0\p@\partopsep=0\p@ + \parsep=0\p@\listparindent=15\p@}\footnotesize\rm} +\let\endvarindent=\endlist +%ENVIRONMENT: abstract +\newenvironment{abstract}{% + \vspace{16pt plus3pt minus3pt} + \begin{indented} + \item[]{\bfseries \abstractname.}\quad\rm\ignorespaces} + {\end{indented}\vspace{10mm}} +%ENVIRONMENT: description +\newenvironment{description} + {\list{}{\labelwidth\z@ \itemindent-\leftmargin + \let\makelabel\descriptionlabel}} + {\endlist} +\newcommand\descriptionlabel[1]{\hspace\labelsep + \normalfont\bfseries #1} +%ENVIRONMENT: quotation +\newenvironment{quotation} + {\list{}{\listparindent 1.5em% + \itemindent \listparindent + \rightmargin \leftmargin + \parsep \z@ \@plus\p@}% + \item[]} + {\endlist} +%ENVIRONMENT: quote +\newenvironment{quote} + {\list{}{\rightmargin\leftmargin}% + \item[]} + {\endlist} +%ENVIRONMENT: verse +\newenvironment{verse} + {\let\\=\@centercr + \list{}{\itemsep \z@ + \itemindent -1.5em% + \listparindent\itemindent + \rightmargin \leftmargin + \advance\leftmargin 1.5em}% + \item[]} + {\endlist} +%ENVIRONMENT: bibliography +\newdimen\bibindent +\setlength\bibindent{1.5em} +\def\thebibliography#1{\list + {\hfil[\arabic{enumi}]}{\topsep=0\p@\parsep=0\p@ + \partopsep=0\p@\itemsep=0\p@ + \labelsep=5\p@\itemindent=-10\p@ + \settowidth\labelwidth{\footnotesize[#1]}% + \leftmargin\labelwidth + \advance\leftmargin\labelsep + \advance\leftmargin -\itemindent + \usecounter{enumi}}\footnotesize + \def\newblock{\ } + \sloppy\clubpenalty4000\widowpenalty4000 + \sfcode`\.=1000\relax} +\let\endthebibliography=\endlist +\def\numrefs#1{\begin{thebibliography}{#1}} +\def\endnumrefs{\end{thebibliography}} +\let\endbib=\endnumrefs +%%%%%%%%%%%%%%%%%% + + +%\newenvironment{thebibliography}[1] +% {\section*{References} +% \list{\@biblabel{\@arabic\c@enumiv}}% +% {\settowidth\labelwidth{\@biblabel{#1}}% +% \leftmargin\labelwidth +% \advance\leftmargin\labelsep +% \@openbib@code +% \usecounter{enumiv}% +% \let\p@enumiv\@empty +% \renewcommand\theenumiv{\@arabic\c@enumiv}}% +% \sloppy +% \clubpenalty4000 +% \@clubpenalty \clubpenalty +% \widowpenalty4000% +% \sfcode`\.\@m} +% {\def\@noitemerr +% {\@latex@warning{Empty `thebibliography' environment}}% +% \endlist} +%\newcommand\newblock{\hskip .11em\@plus.33em\@minus.07em} +%\let\@openbib@code\@empty +%ENVIRONMENT: theindex +\newenvironment{theindex} + {\if@twocolumn + \@restonecolfalse + \else + \@restonecoltrue + \fi + \columnseprule \z@ + \columnsep 35\p@ + \twocolumn[\section*{\indexname}]% + \@mkboth{\MakeUppercase\indexname}% + {\MakeUppercase\indexname}% + \thispagestyle{plain}\parindent\z@ + \parskip\z@ \@plus .3\p@\relax + \let\item\@idxitem} + {\if@restonecol\onecolumn\else\clearpage\fi} +\newcommand\@idxitem{\par\hangindent 40\p@} +\newcommand\subitem{\@idxitem \hspace*{20\p@}} +\newcommand\subsubitem{\@idxitem \hspace*{30\p@}} +\newcommand\indexspace{\par \vskip 10\p@ \@plus5\p@ \@minus3\p@\relax} +%===================== +\def\appendix{\@ifnextchar*{\@appendixstar}{\@appendix}} +\def\@appendix{\eqnobysec\@appendixstar} +\def\@appendixstar{\@@par + \ifnumbysec % Added 30/4/94 to get Table A1, + \@addtoreset{table}{section} % Table B1 etc if numbering by + \@addtoreset{figure}{section}\fi % section + \setcounter{section}{0} + \setcounter{subsection}{0} + \setcounter{subsubsection}{0} + \setcounter{equation}{0} + \setcounter{figure}{0} + \setcounter{table}{0} + \def\thesection{Appendix \Alph{section}} + \def\theequation{\ifnumbysec + \Alph{section}.\arabic{equation}\else + \Alph{section}\arabic{equation}\fi} % Comment A\arabic{equation} maybe + \def\thetable{\ifnumbysec % better? 15/4/95 + \Alph{section}\arabic{table}\else + A\arabic{table}\fi} + \def\thefigure{\ifnumbysec + \Alph{section}\arabic{figure}\else + A\arabic{figure}\fi}} +\def\noappendix{\setcounter{figure}{0} + \setcounter{table}{0} + \def\thetable{\arabic{table}} + \def\thefigure{\arabic{figure}}} +\setlength\arraycolsep{5\p@} +\setlength\tabcolsep{6\p@} +\setlength\arrayrulewidth{.4\p@} +\setlength\doublerulesep{2\p@} +\setlength\tabbingsep{\labelsep} +\skip\@mpfootins = \skip\footins +\setlength\fboxsep{3\p@} +\setlength\fboxrule{.4\p@} +\renewcommand\theequation{\arabic{equation}} +% NAME OF STRUCTURES +\newcommand\contentsname{Contents} +\newcommand\listfigurename{List of Figures} +\newcommand\listtablename{List of Tables} +\newcommand\refname{References} +\newcommand\indexname{Index} +\newcommand\figurename{Figure} +\newcommand\tablename{Table} +\newcommand\partname{Part} +\newcommand\appendixname{Appendix} +\newcommand\abstractname{Abstract} +%Miscellaneous commands +\newcommand{\BibTeX}{{\rm B\kern-.05em{\sc i\kern-.025em b}\kern-.08em + T\kern-.1667em\lower.7ex\hbox{E}\kern-.125emX}} +\newcommand{\jpcsit}{{\bfseries\itshape\selectfont Journal of Physics: Conference Series}} +\newcommand{\jpcs}{{\itshape\selectfont Journal of Physics: Conference Series}} +\newcommand{\iopp}{IOP Publishing} +\newcommand{\cls}{{\upshape\selectfont\texttt{jpconf.cls}}} +\newcommand{\corg}{conference organizer} +\newcommand\today{\number\day\space\ifcase\month\or + January\or February\or March\or April\or May\or June\or + July\or August\or September\or October\or November\or December\fi + \space\number\year} + \setlength\columnsep{10\p@} +\setlength\columnseprule{0\p@} +\newcommand{\Tables}{\clearpage\section*{Tables and table captions} +\def\fps@table{hp}\noappendix} +\newcommand{\Figures}{\clearpage\section*{Figure captions} + \def\fps@figure{hp}\noappendix} +% +\newcommand{\Figure}[1]{\begin{figure} + \caption{#1} + \end{figure}} +% +\newcommand{\Table}[1]{\begin{table} + \caption{#1} + \begin{indented} + \lineup + \item[]\begin{tabular}{@{}l*{15}{l}}} +\def\endTable{\end{tabular}\end{indented}\end{table}} +\let\endtab=\endTable +% +\newcommand{\fulltable}[1]{\begin{table} + \caption{#1} + \lineup + \begin{tabular*}{\textwidth}{@{}l*{15}{@{\extracolsep{0pt plus 12pt}}l}}} +\def\endfulltable{\end{tabular*}\end{table}} +%BIBLIOGRAPHY and References +%\newcommand{\Bibliography}[1]{\section*{References}\par\numrefs{#1}} +%\newcommand{\References}{\section*{References}\par\refs} +%\def\thebibliography#1{\list +% {\hfil[\arabic{enumi}]}{\topsep=0\p@\parsep=0\p@ +% \partopsep=0\p@\itemsep=0\p@ +% \labelsep=5\p@\itemindent=-10\p@ +% \settowidth\labelwidth{\footnotesize[#1]}% +% \leftmargin\labelwidth +% \advance\leftmargin\labelsep +% \advance\leftmargin -\itemindent +% \usecounter{enumi}}\footnotesize +% \def\newblock{\ } +% \sloppy\clubpenalty4000\widowpenalty4000 +% \sfcode`\.=1000\relax} +%\let\endthebibliography=\endlist +%\def\numrefs#1{\begin{thebibliography}{#1}} +%\def\endnumrefs{\end{thebibliography}} +%\let\endbib=\endnumrefs + +\def\thereferences{\list{}{\topsep=0\p@\parsep=0\p@ + \partopsep=0\p@\itemsep=0\p@\labelsep=0\p@\itemindent=-18\p@ +\labelwidth=0\p@\leftmargin=18\p@ +}\footnotesize\rm +\def\newblock{\ } +\sloppy\clubpenalty4000\widowpenalty4000 +\sfcode`\.=1000\relax}% +\let\endthereferences=\endlist +% MISC EQUATRION STUFF +%\def\[{\relax\ifmmode\@badmath\else +% \begin{trivlist} +% \@beginparpenalty\predisplaypenalty +% \@endparpenalty\postdisplaypenalty +% \item[]\leavevmode +% \hbox to\linewidth\bgroup$ \displaystyle +% \hskip\mathindent\bgroup\fi} +%\def\]{\relax\ifmmode \egroup $\hfil \egroup \end{trivlist}\else \@badmath \fi} +%\def\equation{\@beginparpenalty\predisplaypenalty +% \@endparpenalty\postdisplaypenalty +%\refstepcounter{equation}\trivlist \item[]\leavevmode +% \hbox to\linewidth\bgroup $ \displaystyle +%\hskip\mathindent} +%\def\endequation{$\hfil \displaywidth\linewidth\@eqnnum\egroup \endtrivlist} +%\@namedef{equation*}{\[} +%\@namedef{endequation*}{\]} +%\def\eqnarray{\stepcounter{equation}\let\@currentlabel=\theequation +%\global\@eqnswtrue +%\global\@eqcnt\z@\tabskip\mathindent\let\\=\@eqncr +%\abovedisplayskip\topsep\ifvmode\advance\abovedisplayskip\partopsep\fi +%\belowdisplayskip\abovedisplayskip +%\belowdisplayshortskip\abovedisplayskip +%\abovedisplayshortskip\abovedisplayskip +%$$\halign to +%\linewidth\bgroup\@eqnsel$\displaystyle\tabskip\z@ +% {##{}}$&\global\@eqcnt\@ne $\displaystyle{{}##{}}$\hfil +% &\global\@eqcnt\tw@ $\displaystyle{{}##}$\hfil +% \tabskip\@centering&\llap{##}\tabskip\z@\cr} +%\def\endeqnarray{\@@eqncr\egroup +% \global\advance\c@equation\m@ne$$\global\@ignoretrue } +%\mathindent = 6pc +%% +%\def\eqalign#1{\null\vcenter{\def\\{\cr}\openup\jot\m@th +% \ialign{\strut$\displaystyle{##}$\hfil&$\displaystyle{{}##}$\hfil +% \crcr#1\crcr}}\,} +%% +%\def\eqalignno#1{\displ@y \tabskip\z@skip +% \halign to\displaywidth{\hspace{5pc}$\@lign\displaystyle{##}$% +% \tabskip\z@skip +% &$\@lign\displaystyle{{}##}$\hfill\tabskip\@centering +% &\llap{$\@lign\hbox{\rm##}$}\tabskip\z@skip\crcr +% #1\crcr}} +%% +\newif\ifnumbysec +\def\theequation{\ifnumbysec + \arabic{section}.\arabic{equation}\else + \arabic{equation}\fi} +\def\eqnobysec{\numbysectrue\@addtoreset{equation}{section}} +\newcounter{eqnval} +\def\numparts{\addtocounter{equation}{1}% + \setcounter{eqnval}{\value{equation}}% + \setcounter{equation}{0}% + \def\theequation{\ifnumbysec + \arabic{section}.\arabic{eqnval}{\it\alph{equation}}% + \else\arabic{eqnval}{\it\alph{equation}}\fi}} +\def\endnumparts{\def\theequation{\ifnumbysec + \arabic{section}.\arabic{equation}\else + \arabic{equation}\fi}% + \setcounter{equation}{\value{eqnval}}} +% +\def\cases#1{% + \left\{\,\vcenter{\def\\{\cr}\normalbaselines\openup1\jot\m@th% + \ialign{\strut$\displaystyle{##}\hfil$&\tqs + \rm##\hfil\crcr#1\crcr}}\right.}% +\def\eqalign#1{\null\vcenter{\def\\{\cr}\openup\jot\m@th + \ialign{\strut$\displaystyle{##}$\hfil&$\displaystyle{{}##}$\hfil + \crcr#1\crcr}}\,} +% OTHER USEFUL BITS +\newcommand{\e}{\mathrm{e}} +\newcommand{\rme}{\mathrm{e}} +\newcommand{\rmi}{\mathrm{i}} +\newcommand{\rmd}{\mathrm{d}} +\renewcommand{\qquad}{\hspace*{25pt}} +\newcommand{\tdot}[1]{\stackrel{\dots}{#1}} % Added 1/9/94 +\newcommand{\tqs}{\hspace*{25pt}} +\newcommand{\fl}{\hspace*{-\mathindent}} +\newcommand{\Tr}{\mathop{\mathrm{Tr}}\nolimits} +\newcommand{\tr}{\mathop{\mathrm{tr}}\nolimits} +\newcommand{\Or}{\mathord{\mathrm{O}}} %changed from \mathop 20/1/95 +\newcommand{\lshad}{[\![} +\newcommand{\rshad}{]\!]} +\newcommand{\case}[2]{{\textstyle\frac{#1}{#2}}} +\def\pt(#1){({\it #1\/})} +\newcommand{\dsty}{\displaystyle} +\newcommand{\tsty}{\textstyle} +\newcommand{\ssty}{\scriptstyle} +\newcommand{\sssty}{\scriptscriptstyle} +\def\lo#1{\llap{${}#1{}$}} +\def\eql{\llap{${}={}$}} +\def\lsim{\llap{${}\sim{}$}} +\def\lsimeq{\llap{${}\simeq{}$}} +\def\lequiv{\llap{${}\equiv{}$}} +% +\newcommand{\eref}[1]{(\ref{#1})} +%\newcommand{\eqref}[1]{Equation (\ref{#1})} +%\newcommand{\Eqref}[1]{Equation (\ref{#1})} +\newcommand{\sref}[1]{section~\ref{#1}} +\newcommand{\fref}[1]{figure~\ref{#1}} +\newcommand{\tref}[1]{table~\ref{#1}} +\newcommand{\Sref}[1]{Section~\ref{#1}} +\newcommand{\Fref}[1]{Figure~\ref{#1}} +\newcommand{\Tref}[1]{Table~\ref{#1}} +\newcommand{\opencircle}{\mbox{\Large$\circ\,$}} % moved Large outside maths +\newcommand{\opensquare}{\mbox{$\rlap{$\sqcap$}\sqcup$}} +\newcommand{\opentriangle}{\mbox{$\triangle$}} +\newcommand{\opentriangledown}{\mbox{$\bigtriangledown$}} +\newcommand{\opendiamond}{\mbox{$\diamondsuit$}} +\newcommand{\fullcircle}{\mbox{{\Large$\bullet\,$}}} % moved Large outside maths +\newcommand{\fullsquare}{\,\vrule height5pt depth0pt width5pt} +\newcommand{\dotted}{\protect\mbox{${\mathinner{\cdotp\cdotp\cdotp\cdotp\cdotp\cdotp}}$}} +\newcommand{\dashed}{\protect\mbox{-\; -\; -\; -}} +\newcommand{\broken}{\protect\mbox{-- -- --}} +\newcommand{\longbroken}{\protect\mbox{--- --- ---}} +\newcommand{\chain}{\protect\mbox{--- $\cdot$ ---}} +\newcommand{\dashddot}{\protect\mbox{--- $\cdot$ $\cdot$ ---}} +\newcommand{\full}{\protect\mbox{------}} + +\def\;{\protect\psemicolon} +\def\psemicolon{\relax\ifmmode\mskip\thickmuskip\else\kern .3333em\fi} +\def\lineup{\def\0{\hbox{\phantom{0}}}% + \def\m{\hbox{$\phantom{-}$}}% + \def\-{\llap{$-$}}} +% +%%%%%%%%%%%%%%%%%%%%% +% Tables rules % +%%%%%%%%%%%%%%%%%%%%% + +\newcommand{\boldarrayrulewidth}{1\p@} +% Width of bold rule in tabular environment. + +\def\bhline{\noalign{\ifnum0=`}\fi\hrule \@height +\boldarrayrulewidth \futurelet \@tempa\@xhline} + +\def\@xhline{\ifx\@tempa\hline\vskip \doublerulesep\fi + \ifnum0=`{\fi}} + +% +% Rules for tables with extra space around +% +\newcommand{\br}{\ms\bhline\ms} +\newcommand{\mr}{\ms\hline\ms} +% +\newcommand{\centre}[2]{\multispan{#1}{\hfill #2\hfill}} +\newcommand{\crule}[1]{\multispan{#1}{\hspace*{\tabcolsep}\hrulefill + \hspace*{\tabcolsep}}} +\newcommand{\fcrule}[1]{\ifnum\thetabtype=1\multispan{#1}{\hrulefill + \hspace*{\tabcolsep}}\else\multispan{#1}{\hrulefill}\fi} +% +% Extra spaces for tables and displayed equations +% +\newcommand{\ms}{\noalign{\vspace{3\p@ plus2\p@ minus1\p@}}} +\newcommand{\bs}{\noalign{\vspace{6\p@ plus2\p@ minus2\p@}}} +\newcommand{\ns}{\noalign{\vspace{-3\p@ plus-1\p@ minus-1\p@}}} +\newcommand{\es}{\noalign{\vspace{6\p@ plus2\p@ minus2\p@}}\displaystyle}% +% +\newcommand{\etal}{{\it et al\/}\ } +\newcommand{\dash}{------} +\newcommand{\nonum}{\par\item[]} %\par added 1/9/93 +\newcommand{\mat}[1]{\underline{\underline{#1}}} +% +% abbreviations for IOPP journals +% +\newcommand{\CQG}{{\it Class. Quantum Grav.} } +\newcommand{\CTM}{{\it Combust. Theory Modelling\/} } +\newcommand{\DSE}{{\it Distrib. Syst. Engng\/} } +\newcommand{\EJP}{{\it Eur. J. Phys.} } +\newcommand{\HPP}{{\it High Perform. Polym.} } % added 4/5/93 +\newcommand{\IP}{{\it Inverse Problems\/} } +\newcommand{\JHM}{{\it J. Hard Mater.} } % added 4/5/93 +\newcommand{\JO}{{\it J. Opt.} } +\newcommand{\JOA}{{\it J. Opt. A: Pure Appl. Opt.} } +\newcommand{\JOB}{{\it J. Opt. B: Quantum Semiclass. Opt.} } +\newcommand{\JPA}{{\it J. Phys. A: Math. Gen.} } +\newcommand{\JPB}{{\it J. Phys. B: At. Mol. Phys.} } %1968-87 +\newcommand{\jpb}{{\it J. Phys. B: At. Mol. Opt. Phys.} } %1988 and onwards +\newcommand{\JPC}{{\it J. Phys. C: Solid State Phys.} } %1968--1988 +\newcommand{\JPCM}{{\it J. Phys.: Condens. Matter\/} } %1989 and onwards +\newcommand{\JPD}{{\it J. Phys. D: Appl. Phys.} } +\newcommand{\JPE}{{\it J. Phys. E: Sci. Instrum.} } +\newcommand{\JPF}{{\it J. Phys. F: Met. Phys.} } +\newcommand{\JPG}{{\it J. Phys. G: Nucl. Phys.} } %1975--1988 +\newcommand{\jpg}{{\it J. Phys. G: Nucl. Part. Phys.} } %1989 and onwards +\newcommand{\MSMSE}{{\it Modelling Simulation Mater. Sci. Eng.} } +\newcommand{\MST}{{\it Meas. Sci. Technol.} } %1990 and onwards +\newcommand{\NET}{{\it Network: Comput. Neural Syst.} } +\newcommand{\NJP}{{\it New J. Phys.} } +\newcommand{\NL}{{\it Nonlinearity\/} } +\newcommand{\NT}{{\it Nanotechnology} } +\newcommand{\PAO}{{\it Pure Appl. Optics\/} } +\newcommand{\PM}{{\it Physiol. Meas.} } % added 4/5/93 +\newcommand{\PMB}{{\it Phys. Med. Biol.} } +\newcommand{\PPCF}{{\it Plasma Phys. Control. Fusion\/} } % added 4/5/93 +\newcommand{\PSST}{{\it Plasma Sources Sci. Technol.} } +\newcommand{\PUS}{{\it Public Understand. Sci.} } +\newcommand{\QO}{{\it Quantum Opt.} } +\newcommand{\QSO}{{\em Quantum Semiclass. Opt.} } +\newcommand{\RPP}{{\it Rep. Prog. Phys.} } +\newcommand{\SLC}{{\it Sov. Lightwave Commun.} } % added 4/5/93 +\newcommand{\SST}{{\it Semicond. Sci. Technol.} } +\newcommand{\SUST}{{\it Supercond. Sci. Technol.} } +\newcommand{\WRM}{{\it Waves Random Media\/} } +\newcommand{\JMM}{{\it J. Micromech. Microeng.\/} } +% +% Other commonly quoted journals +% +\newcommand{\AC}{{\it Acta Crystallogr.} } +\newcommand{\AM}{{\it Acta Metall.} } +\newcommand{\AP}{{\it Ann. Phys., Lpz.} } +\newcommand{\APNY}{{\it Ann. Phys., NY\/} } +\newcommand{\APP}{{\it Ann. Phys., Paris\/} } +\newcommand{\CJP}{{\it Can. J. Phys.} } +\newcommand{\JAP}{{\it J. Appl. Phys.} } +\newcommand{\JCP}{{\it J. Chem. Phys.} } +\newcommand{\JJAP}{{\it Japan. J. Appl. Phys.} } +\newcommand{\JP}{{\it J. Physique\/} } +\newcommand{\JPhCh}{{\it J. Phys. Chem.} } +\newcommand{\JMMM}{{\it J. Magn. Magn. Mater.} } +\newcommand{\JMP}{{\it J. Math. Phys.} } +\newcommand{\JOSA}{{\it J. Opt. Soc. Am.} } +\newcommand{\JPSJ}{{\it J. Phys. Soc. Japan\/} } +\newcommand{\JQSRT}{{\it J. Quant. Spectrosc. Radiat. Transfer\/} } +\newcommand{\NC}{{\it Nuovo Cimento\/} } +\newcommand{\NIM}{{\it Nucl. Instrum. Methods\/} } +\newcommand{\NP}{{\it Nucl. Phys.} } +\newcommand{\PL}{{\it Phys. Lett.} } +\newcommand{\PR}{{\it Phys. Rev.} } +\newcommand{\PRL}{{\it Phys. Rev. Lett.} } +\newcommand{\PRS}{{\it Proc. R. Soc.} } +\newcommand{\PS}{{\it Phys. Scr.} } +\newcommand{\PSS}{{\it Phys. Status Solidi\/} } +\newcommand{\PTRS}{{\it Phil. Trans. R. Soc.} } +\newcommand{\RMP}{{\it Rev. Mod. Phys.} } +\newcommand{\RSI}{{\it Rev. Sci. Instrum.} } +\newcommand{\SSC}{{\it Solid State Commun.} } +\newcommand{\ZP}{{\it Z. Phys.} } +%=================== +\pagestyle{headings} +\pagenumbering{arabic} +\raggedbottom +\onecolumn +\endinput +%% +%% End of file `jconf.cls'. diff --git a/contributions/sd_storm2/jpconf11.clo b/contributions/sd_storm2/jpconf11.clo new file mode 100644 index 0000000000000000000000000000000000000000..63541cbb98638b86bbc1df2d09f4eafbe3233a42 --- /dev/null +++ b/contributions/sd_storm2/jpconf11.clo @@ -0,0 +1,141 @@ +%% +%% This is file `jpconf11.clo' +%% +%% This file is distributed in the hope that it will be useful, +%% but WITHOUT ANY WARRANTY; without even the implied warranty of +%% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +%% +%% \CharacterTable +%% {Upper-case \A\B\C\D\E\F\G\H\I\J\K\L\M\N\O\P\Q\R\S\T\U\V\W\X\Y\Z +%% Lower-case \a\b\c\d\e\f\g\h\i\j\k\l\m\n\o\p\q\r\s\t\u\v\w\x\y\z +%% Digits \0\1\2\3\4\5\6\7\8\9 +%% Exclamation \! Double quote \" Hash (number) \# +%% Dollar \$ Percent \% Ampersand \& +%% Acute accent \' Left paren \( Right paren \) +%% Asterisk \* Plus \+ Comma \, +%% Minus \- Point \. Solidus \/ +%% Colon \: Semicolon \; Less than \< +%% Equals \= Greater than \> Question mark \? +%% Commercial at \@ Left bracket \[ Backslash \\ +%% Right bracket \] Circumflex \^ Underscore \_ +%% Grave accent \` Left brace \{ Vertical bar \| +%% Right brace \} Tilde \~} +\ProvidesFile{jpconf11.clo}[2005/05/04 v1.0 LaTeX2e file (size option)] +\renewcommand\normalsize{% + \@setfontsize\normalsize\@xipt{13}% + \abovedisplayskip 12\p@ \@plus3\p@ \@minus7\p@ + \abovedisplayshortskip \z@ \@plus3\p@ + \belowdisplayshortskip 6.5\p@ \@plus3.5\p@ \@minus3\p@ + \belowdisplayskip \abovedisplayskip + \let\@listi\@listI} +\normalsize +\newcommand\small{% + \@setfontsize\small\@xpt{12}% + \abovedisplayskip 11\p@ \@plus3\p@ \@minus6\p@ + \abovedisplayshortskip \z@ \@plus3\p@ + \belowdisplayshortskip 6.5\p@ \@plus3.5\p@ \@minus3\p@ + \def\@listi{\leftmargin\leftmargini + \topsep 9\p@ \@plus3\p@ \@minus5\p@ + \parsep 4.5\p@ \@plus2\p@ \@minus\p@ + \itemsep \parsep}% + \belowdisplayskip \abovedisplayskip} +\newcommand\footnotesize{% +% \@setfontsize\footnotesize\@xpt\@xiipt + \@setfontsize\footnotesize\@ixpt{11}% + \abovedisplayskip 10\p@ \@plus2\p@ \@minus5\p@ + \abovedisplayshortskip \z@ \@plus3\p@ + \belowdisplayshortskip 6\p@ \@plus3\p@ \@minus3\p@ + \def\@listi{\leftmargin\leftmargini + \topsep 6\p@ \@plus2\p@ \@minus2\p@ + \parsep 3\p@ \@plus2\p@ \@minus\p@ + \itemsep \parsep}% + \belowdisplayskip \abovedisplayskip +} +\newcommand\scriptsize{\@setfontsize\scriptsize\@viiipt{9.5}} +\newcommand\tiny{\@setfontsize\tiny\@vipt\@viipt} +\newcommand\large{\@setfontsize\large\@xivpt{18}} +\newcommand\Large{\@setfontsize\Large\@xviipt{22}} +\newcommand\LARGE{\@setfontsize\LARGE\@xxpt{25}} +\newcommand\huge{\@setfontsize\huge\@xxvpt{30}} +\let\Huge=\huge +\if@twocolumn + \setlength\parindent{14\p@} + \else + \setlength\parindent{18\p@} +\fi +\if@letterpaper% +%\input{letmarg.tex}% +\setlength{\hoffset}{0mm} +\setlength{\marginparsep}{0mm} +\setlength{\marginparwidth}{0mm} +\setlength{\textwidth}{160mm} +\setlength{\oddsidemargin}{-0.4mm} +\setlength{\evensidemargin}{-0.4mm} +\setlength{\voffset}{0mm} +\setlength{\headheight}{8mm} +\setlength{\headsep}{5mm} +\setlength{\footskip}{0mm} +\setlength{\textheight}{230mm} +\setlength{\topmargin}{1.6mm} +\else +%\input{a4marg.tex}% +\setlength{\hoffset}{0mm} +\setlength{\marginparsep}{0mm} +\setlength{\marginparwidth}{0mm} +\setlength{\textwidth}{160mm} +\setlength{\oddsidemargin}{-0.4mm} +\setlength{\evensidemargin}{-0.4mm} +\setlength{\voffset}{0mm} +\setlength{\headheight}{8mm} +\setlength{\headsep}{5mm} +\setlength{\footskip}{0mm} +\setlength{\textheight}{230mm} +\setlength{\topmargin}{1.6mm} +\fi +\setlength\maxdepth{.5\topskip} +\setlength\@maxdepth\maxdepth +\setlength\footnotesep{8.4\p@} +\setlength{\skip\footins} {10.8\p@ \@plus 4\p@ \@minus 2\p@} +\setlength\floatsep {14\p@ \@plus 2\p@ \@minus 4\p@} +\setlength\textfloatsep {24\p@ \@plus 2\p@ \@minus 4\p@} +\setlength\intextsep {16\p@ \@plus 4\p@ \@minus 4\p@} +\setlength\dblfloatsep {16\p@ \@plus 2\p@ \@minus 4\p@} +\setlength\dbltextfloatsep{24\p@ \@plus 2\p@ \@minus 4\p@} +\setlength\@fptop{0\p@} +\setlength\@fpsep{10\p@ \@plus 1fil} +\setlength\@fpbot{0\p@} +\setlength\@dblfptop{0\p@} +\setlength\@dblfpsep{10\p@ \@plus 1fil} +\setlength\@dblfpbot{0\p@} +\setlength\partopsep{3\p@ \@plus 2\p@ \@minus 2\p@} +\def\@listI{\leftmargin\leftmargini + \parsep=\z@ + \topsep=6\p@ \@plus3\p@ \@minus3\p@ + \itemsep=3\p@ \@plus2\p@ \@minus1\p@} +\let\@listi\@listI +\@listi +\def\@listii {\leftmargin\leftmarginii + \labelwidth\leftmarginii + \advance\labelwidth-\labelsep + \topsep=3\p@ \@plus2\p@ \@minus\p@ + \parsep=\z@ + \itemsep=\parsep} +\def\@listiii{\leftmargin\leftmarginiii + \labelwidth\leftmarginiii + \advance\labelwidth-\labelsep + \topsep=\z@ + \parsep=\z@ + \partopsep=\z@ + \itemsep=\z@} +\def\@listiv {\leftmargin\leftmarginiv + \labelwidth\leftmarginiv + \advance\labelwidth-\labelsep} +\def\@listv{\leftmargin\leftmarginv + \labelwidth\leftmarginv + \advance\labelwidth-\labelsep} +\def\@listvi {\leftmargin\leftmarginvi + \labelwidth\leftmarginvi + \advance\labelwidth-\labelsep} +\endinput +%% +%% End of file `iopart12.clo'. diff --git a/contributions/sd_storm2/main.tex b/contributions/sd_storm2/main.tex new file mode 100644 index 0000000000000000000000000000000000000000..4d5f05ccbd270a870fee6c68ead4e66abf5d8eff --- /dev/null +++ b/contributions/sd_storm2/main.tex @@ -0,0 +1,404 @@ +\documentclass[a4paper]{jpconf} + +\usepackage{url} +\usepackage{graphicx} +\usepackage{float} + +\newcommand{\quotes}[1]{``#1''} + +\begin{document} + +\title{StoRM 2: initial design and development activities} + +\author{ + A.~Ceccanti$^1$, + F.~Giacomini$^1$, + E.~Vianello$^1$, + E.~Ronchieri$^1$ +} + +\address{$^1$ INFN-CNAF, Bologna, IT} + +\ead{ + andrea.ceccanti@cnaf.infn.it +} + +\begin{abstract} + StoRM is the storage element solution that powers the CNAF Tier 1 + data center as well as more than 30 other sites. Experience in + developing, maintaining and operating it at scale suggests that a + significant refactoring of the codebase is necessary to improve + StoRM maintainability, reliability, scalability and ease of + operation in order to meet the data management requirements coming + from HL-LHC and other communities served by the CNAF Tier 1 data + center. In this contribution we highlight the initial StoRM 2 + design and development activities. +\end{abstract} + +\section{Introduction} +\label{sec:introduction} + +StoRM was first developed by a joint collaboration between INFN-CNAF, CERN and +ICTP to provide a lightweight storage element solution implementing the +SRM~\cite{ref:srm} interface on top of a POSIX filesystem. StoRM has a layered +architecture (Figure~\ref{fig:storm-arch}), split between two main components: +the StoRM frontend and backend services. The StoRM frontend service implements +the SRM interface exposed to client applications and frameworks. The StoRM +backend service implements the actual storage management logic by interacting +directly with the underlying file system. + +Communication between the frontend and the backend services happens in two ways: +\begin{itemize} + \item via an XML-RPC API, for synchronous requests; + \item via a database, for asynchronous requests. +\end{itemize} + +Data transfers are provided by GridFTP, HTTP and XRootD services accessing +directly the file system underlying the StoRM deployment. + +StoRM is interfaced with the IBM Tivoli Storage Manager (TSM) via +GEMSS~\cite{ref:gemss}, a component also developed at INFN, to provide optimized +data archiving and tape recall functionality. + +The StoRM WebDAV service provides an alternative data management interface +complementary to the SRM functionality, albeit without supporting tape +operations yet. + +In the past years StoRM has powered the CNAF Tier 1 data center as well as +dozens of other sites and proved to be a reliable SRM implementation. However, +ten years of experience in developing and operating the service at scale has +also shown limitations: + +\begin{itemize} + + \item The StoRM code base is not unit-tested; this means that there is no + quick feedback loop that functionality is not broken when a change is + introduced or a refactoring is implemented; there are integration and load + test suites that can be used to assess that functionality is not broken, + but these test suites are more complex to instantiate, require a full + service deployment and do no provide coverage information. + + \item Data management responsibilities are scattered among several + components without clear reasons, increasing maintenance and developments + costs. + + \item The StoRM backend cannot be horizontally replicated; this causes + operational problems in production and limits scalability and the ability + to adapt dynamically to load changes. + + \item Logging is not harmonized among the StoRM services and limited + tracing is provided, so that it is not trivial to trace the history of an + incoming request across the services. + + \item Core StoRM communication and authentication functionality relies on + dated technologies and libraries (e.g., XML-RPC, CGSI-gSOAP); + + \item The codebase is significantly more complex than needed due to the + inorganic growth and lack of periodic quality assessment performed on the + code base. + +\end{itemize} + +To address these shortcomings, a redesign of the StoRM service has been planned +and started this year, in parallel with the main StoRM maintenance and +development activities. + +\begin{figure} + \centering + \includegraphics[width=.6\textwidth]{storm-arch.png} + \caption{\label{fig:storm-arch}The StoRM 1 architecture.} +\end{figure} + +\section{StoRM 2 high-level architecture} + +The StoRM 2 architecture is depicted in Figure~\ref{fig:storm2-arch}. + +\begin{figure} + \centering + \includegraphics[width=.6\textwidth]{high-level-arch.png} + \caption{\label{fig:storm2-arch}The StoRM 2 high-level architecture.} +\end{figure} + +The layered architecture approach is maintained, so that service logic is again +split between frontend and backend service components. + +The frontend responsibility is to implement the interfaces towards the outside +world. In practice, the frontend is implemented by multiple microservices, +each responsible of a specific interface (SRM, WebDAV, etc.). + +TLS termination and client authentication is implemented at the edge of the +service perimeter by one (or more) Nginx reverse proxy instances. There are +several advantages in this approach: + +\begin{itemize} + + \item The TLS handling load is decoupled from request management load. + + \item VOMS-related configuration and handling is centralized to a single + component, leading to simplified service operation and troubleshooting. + + \item The TLS terminator becomes a natural place to implement load balancing + for the frontend services. + +\end{itemize} + +VOMS authorization support is provided by an Nginx VOMS module +~\cite{ref:nginx-voms} developed for this purpose and described in more detail +in another contribution in this report. + +Besides implementing the management protocol endpoints, the frontends expose other +management and monitoring interfaces that can be consumed by internal services and +may use a relational or in-memory database to persist state information in support +of request management and accounting. + +Frontends do not directly interact with the storage, but delegate the +interaction to a backend service. + +The backend is a stateless service that implements basic management operations on the +storage. The storage management operations implemented are the minimum set of +operations needed to support the data management interfaces exposed by the +frontends. These operations are typically either data object lifecycle +operations (e.g., create or remove a file or a directory, list directory contents) or +metadata operations (e.g., get the size of a file, manage ACLs). + +The communication between the frontend and the backend services is implemented +on top of gRPC~\cite{ref:grpc}, a remote procedure call system initially +developed at Google. The actual messages exchanged between them are +synthesized from a description expressed in an interface description language +called \textit{Protocol Buffers}~\cite{ref:protocol-buffers}; from the same +message description, language-specific client and server stubs are generated. As +an example, the following listing shows the description of the messages and of +the service involved in the simple case of the \textit{version} command. + +{\small +\begin{verbatim} +message VersionRequest { + // The version of the client calling the service. + string version = 1; +} + +message VersionResponse { + // The version of the service answering the call + string version = 1; +} + +service VersionService { + rpc getVersion(VersionRequest) returns (VersionResponse); +} +\end{verbatim} +} + +\section{Principles guiding the development work} + +The following principles have driven the StoRM 2 development work. + +\begin{itemize} + + \item The source code will be kept in a Git repository hosted on the INFN + Gitlab service; the development will follow a branching model inspired + at Git-workflow~\cite{ref:gitflow} and already successfully used for other + components developed by the team (e.g., VOMS, INDIGO IAM, StoRM). + + \item Rhe code for all main components (frontend and backend services, + CLIs, etc.) will be hosted on a single repository and a single version number + will be shared for all the components. + + \item A test-driven development approach will be followed, using tools that + allow to measure the test coverage of the codebase. The objective is to + ensure high coverage ($>90\%$) on all code. + + \item Whenever possible, the code should be self-documenting; the source code folder + structure will be documented with README.md files providing a + description of each folder contents; a CHANGELOG file will provide + information of new features and bug fixes following established + industry best practices~\cite{ref:keep-a-changelog}. + + \item The development and testing environment will be containerized, in + order to ensure a consistent environment definition and avoid "works on my + machine" issues. + + \item Services should provide monitoring and metrics endpoints to enable the + collection of status information and performance metrics. + + \item Service should support graceful shutdown and draining. + + \item A CI pipeline will be in place, to build and test continuously the code. + + \item A consistent configuration and logging format will be adopted across + all the components, to make service operations easier and simplify log + files interpretation, aggregation and management. + + \item Support for request traceability will be part of the system since its + inception. + +\end{itemize} + +The development of StoRM 2 will be organized in SCRUM-like sprints, where each +sprint will be roughly 4-5 weeks long. + +The output of each sprint should be a deployable instance of the services +implementing a subset of the whole foreseen StoRM 2 functionality. + +\section{The build and test environment} + +The build environment heavily relies on container technology~\cite{ref:docker}, +both to guarantee full build and test reproducibility and to offer a common +reference platform for development. + +Since the code for all components is kept in a single git repository, we have +also opted for a single Docker image to build everything, containing all the +needed build tools (compilers, unit testing frameworks, static and dynamic +analyzers, external dependencies, etc.). The resulting image is large but still +manageable and having one image simplifies the operations. + +There are also a couple of other Docker images: one is a specialization of the +build image mentioned above and is dedicated to the build of the Nginx VOMS +module; the other is an image with client tools used during integration testing. + +All the image Dockerfiles are kept in a single repository, under continuous +integration, so that every time there is a change the images are rebuilt. + +\section{The StoRM 2 frontend component} + +The StoRM 2 frontend is composed of a set of stateless Spring Boot 2 +applications written in Java that implement the management protocol endpoints, +such as SRM~\cite{ref:srm} and WebDAV~\cite{ref:webdav}. The frontend services +maintain state in an external database. + +The main frontend responsibilities are to: +\begin{itemize} + + \item implement consistent authorization, taking as input the + authentication information exposed by the Nginx TLS terminator and + matching this information with a common authorization policy; + + \item implement request validation and management, i.e., + protocol-specific management of request queuing as well as conflict + handling; + + \item translate protocol-specific requests to a set of basic storage + management operations executed by the backend and exposed via a set of + gRPC services; + + \item provide service management and observability endpoints, to allow + administrators to get information about the requests currently being + serviced by the system, drain the service or manually force request status + transitions. + +\end{itemize} + +The first frontend service developed in StoRM 2 focuses on the SRM interface, +and at the time of this writing implements support for the SRM \textit{ping} and +\textit{ls} methods. + +In the initial development sprints, significant work has been devoted to ensure +the testability of the frontend component in isolation, by leveraging the +powerful testing support provided by Spring~\cite{ref:spring} and the gRPC +frameworks. + +\section{The StoRM 2 backend component} + +The StoRM 2 backend is a gRPC server that provides multiple +services. One service responds to \textit{version} requests. Another +service responds to storage-related requests, which represent the main +scope of StoRM. In general there is no direct, one-to-one mapping +between SRM requests arriving at the frontend and requests addressed +to the backend; rather, these represent building blocks that the +frontend can compose in order to prepare the responses to SRM clients. + +Among the storage requests addressed to the backend, at the moment +only a couple are implemented: \textit{ls}, in its multiple variations +(for a file or a directory, recursive, up to a given depth, etc.), +returns information about files and directories; \textit{pin}, +\textit{unpin} and \textit{pin status} manage the +\verb|user.storm.pinned| attribute of filesystem entities, which is +essential for the implementation of the more complex +\textit{srmPrepareToGet} SRM request. + +All the backend requests are currently blocking: a response is sent +back to the frontend only when the request has been fully processed. + +The backend also incorporates sub-components of more general utility +to operate on Filesystem Extended Attributes and POSIX Access Control +Lists~\cite{p1003.1e}, adding a layer of safety and expressivity on +top of the native C APIs. They allow to define attributes and ACLs +respectively and to apply them to or read them from filesystem +entities. + +For example the following sets the attribute \verb|user.storm.pinned| +of file \verb|myFile.txt| to the pin duration: + +{\small +\begin{verbatim} +set_xattr( + storage_dir / "myFile.txt", + StormXAttrName{"pinned"}, + XAttrValue{duration} +); +\end{verbatim} +} + +The following instead extends the ACL currently assigned to +\verb|myFile.txt| with some additional entries: + +{\small +\begin{verbatim} +add_to_access_acl( + storage_dir / "myFile.txt", + { + {User{"storm"}, Perms::Read | Perms::Write}, + {Group{"storm"}, Perms::Read}, + {other, Perms::None} + } +); +\end{verbatim} +} + +The backend is implemented in C++, in the latest standard version +supported by the toolset installed in the reference platform +(currently C++17). The build system is based on CMake. + +The backend relies on some other third-party dependencies, the most +important being for interaction with the filesystem (Boost +Filesystem~\cite{ref:boost.fs}), for logging (Boost +Log~\cite{ref:boost.log}) and for handling configuration +(yaml-cpp~\cite{ref:yaml-cpp}). + +\section{Test suite and continuous integration} + +The test suite is based on the Robot Framework~\cite{ref:rf} and is typically +run in a Docker container. A deployment test pipeline~\cite{ref:glcip} runs on +our Gitlab-based continuous integration (CI) system every night (and after any +commit on the master branch) to instantiate the main StoRM 2 services and +execute the SRM testsuite. The reports of the test suite execution are archived +and published on the Gitlab CI dashboard. Services and the test suite are +orchestrated using Docker Compose~\cite{ref:dc}. This approach provides an +intuitive, self-contained testing environment deployable on the CI system and on +the developers workstations. + +The test deployment mirrors the architecture shown in +Figure~\ref{fig:storm-arch}, with clients and services placed in different +docker networks to mimic a real-life deployment scenario. + +\section{Conclusions and future work} + +In this contribution we have described the initial design and development +activities performed during 2018 on StoRM 2, the next incarnation of the StoRM +storage management system. + +The main objectives of the StoRM refactoring is to improve the service +scalability and manageability in order to meet the data management requirements +of HL-LHC. The initial work of this year focused on choosing tools, +methodologies and approach with a strong emphasis on software quality. + +In the future we will build on this groundwork to provide a full replacement +for the existing StoRM implementation. The lack of dedicated manpower for this +activity makes it hard to estimate when StoRM 2 will be ready to be deployed in +production. + +\section*{References} + +\bibliographystyle{iopart-num} +\bibliography{biblio} + +\end{document} diff --git a/contributions/sd_storm2/storm-arch.png b/contributions/sd_storm2/storm-arch.png new file mode 100644 index 0000000000000000000000000000000000000000..75571801d055082e18c5b04b20d9ecaf84801e1f Binary files /dev/null and b/contributions/sd_storm2/storm-arch.png differ diff --git a/contributions/sdds-deep/SDDS-DEEP.tex b/contributions/sdds-deep/SDDS-DEEP.tex index ab9e7d3234b64fa1c68968b4de857e1ee4e4a823..ef5fdee5a77b879d6d4442fada8bd5f8fc64ef3f 100644 --- a/contributions/sdds-deep/SDDS-DEEP.tex +++ b/contributions/sdds-deep/SDDS-DEEP.tex @@ -10,7 +10,6 @@ \address{$^1$ INFN-CNAF, Bologna, Italy} - \ead{alessandro.costantini@cnaf.infn.it} \begin{abstract} diff --git a/contributions/storage/storage.tex b/contributions/storage/storage.tex index 77738ad7c5a039cf1120f711a15d02e8f0c18dd8..9a4adf4b6245a8b533bc01a149eca0dcf5cbb672 100644 --- a/contributions/storage/storage.tex +++ b/contributions/storage/storage.tex @@ -17,8 +17,8 @@ \begin{document} \title{Data management and storage systems} -\author{A. Cavalli, D. Cesini, A. Falabella, E. Fattibene, L.Morganti, A. Prosperini and V. Sapunenko} -\address{INFN-CNAF, Bologna, IT} +\author{A. Cavalli$^1$, D. Cesini$^1$, A. Falabella$^1$, E. Fattibene$^1$, L.Morganti$^1$, A. Prosperini$^1$, V. Sapunenko$^1$} +\address{$^1$ INFN-CNAF, Bologna, IT} \ead{vladimir.sapunenko@cnaf.infn.it} @@ -69,7 +69,7 @@ A list of storage systems in production as of 31.12.2018 is given in Table \ref{ The first three months of 2018 were completely dedicated to recovery of the hardware and restoring of the services after the flood event which happened on November $9^{th}$ 2017. -At that time, the Tier-1 storage at CNAF consisted of the resources listed in Table \ref{table:1}. Almost all storage resources were damaged or contaminated by dirty water. +At that time, the Tier 1 storage at CNAF consisted of the resources listed in Table \ref{table:1}. Almost all storage resources were damaged or contaminated by dirty water. \begin{table}[h!] \centering @@ -146,7 +146,7 @@ In total 15 servers were damaged by contact with water, mainly by leak of acid f Also, three Fiber Channel switches were affected by the flood: Brocade 48000 (384 ports) and two Brocade 5300 (96 ports each). All three switches were successfully recovered after cleaning and replacement of power supply modules. \subsection{Results of hardware recovery} -At the end, after the restart of the Tier1 data center, we have completely recovered all services and most part of the HW, as described in the following Table \ref{table:2}. +At the end, after the restart of the Tier 1 data center, we have completely recovered all services and most part of the HW, as described in the following Table \ref{table:2}. \begin{table}[h!] @@ -183,7 +183,7 @@ We are trying to keep all our infrastructures redundant: the dual-path connectio The StoRM instances have been virtualized both allowing the implementation of HA. \section{GEMSS} -GEMSS is the Mass Storage System used at the Tier-1, a full HSM integration of the General Parallel File System (GPFS), the Tivoli Storage Manager (TSM), both from IBM, and StoRM (developed at INFN); its primary advantages are a high reliability and a low effort needed for its operation. +GEMSS is the Mass Storage System used at the Tier 1, a full HSM integration of the General Parallel File System (GPFS), the Tivoli Storage Manager (TSM), both from IBM, and StoRM (developed at INFN); its primary advantages are a high reliability and a low effort needed for its operation. The GPFS and TSM interaction is the main component of the GEMSS system: a thin software layer has been developed in order to optimize the migration (disk to tape data flow) and, in particular, the recall (tape to disk data flow) operations. @@ -220,7 +220,7 @@ Work is ongoing to verify the availability and the correctness of all CDF data s \section{Third Party Copy activities in DOMA} -At the end of the summer, we joined the TPC (Third Party Copy) subgroup of the WLCG’s DOMA\footnote{Data Organization, Management, and Access. see https://twiki.cern.ch/twiki/bin/view/LCG/DomaActivities} project, dedicated to improving bulk transfers between WLCG sites using non-GridFTP protocols. In particular, the INFN-Tier1 is involved in these activities for what concerns StoRM WebDAV. +At the end of the summer, we joined the TPC (Third Party Copy) subgroup of the WLCG’s DOMA\footnote{Data Organization, Management, and Access. see https://twiki.cern.ch/twiki/bin/view/LCG/DomaActivities} project, dedicated to improving bulk transfers between WLCG sites using non-GridFTP protocols. In particular, the INFN-Tier 1 is involved in these activities for what concerns StoRM WebDAV. In October, the two StoRM WebDAV servers used in production by the ATLAS experiment have been upgraded to a version that implements basic support for Third-Party-Copy, and both endpoints entered the distributed TPC testbed of volunteer sites. diff --git a/contributions/summerstudent/.gitkeep b/contributions/summerstudent/.gitkeep new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/contributions/summerstudent/MLalgorithms.png b/contributions/summerstudent/MLalgorithms.png new file mode 100644 index 0000000000000000000000000000000000000000..0ba828b3357fb8aada482c5b6cc4e7b219341744 Binary files /dev/null and b/contributions/summerstudent/MLalgorithms.png differ diff --git a/contributions/summerstudent/StoRM-full-picture.png b/contributions/summerstudent/StoRM-full-picture.png new file mode 100644 index 0000000000000000000000000000000000000000..5057de7a47b71df4576528d034b321795f1a5ca4 Binary files /dev/null and b/contributions/summerstudent/StoRM-full-picture.png differ diff --git a/contributions/summerstudent/StoRM.png b/contributions/summerstudent/StoRM.png new file mode 100644 index 0000000000000000000000000000000000000000..abc85b570764f94b51650a5973f1664def268ba0 Binary files /dev/null and b/contributions/summerstudent/StoRM.png differ diff --git a/contributions/summerstudent/kibana.png b/contributions/summerstudent/kibana.png new file mode 100644 index 0000000000000000000000000000000000000000..ce7d4848491386c4fef93704d87aaaf50024c500 Binary files /dev/null and b/contributions/summerstudent/kibana.png differ diff --git a/contributions/summerstudent/summerstudent.tex b/contributions/summerstudent/summerstudent.tex new file mode 100644 index 0000000000000000000000000000000000000000..c8dd0de3d48d3dccd888bd327ed302d4b874c1ac --- /dev/null +++ b/contributions/summerstudent/summerstudent.tex @@ -0,0 +1,90 @@ +\documentclass[a4paper]{jpconf} +\usepackage{graphicx} +\begin{document} +\title{INFN CNAF log analysis: a first experience with summer students} + +\author{D. Bonacorsi$^1$, A. Ceccanti$^2$, T. Diotalevi$^1$, A. Falabella$^2$, L. Giommi$^2$, B. Martelli$^2$, D. Michelotto$^2$, L. Morganti$^2$, S. Rossi Tisbeni$^1$, E. Vianello$^2$} + +\address{$^1$ University of Bologna, Bologna, IT} +\address{$^2$ INFN-CNAF, Bologna, IT} + +\ead{barbara.martelli@cnaf.infn.it} + +\begin{abstract} +In 2018 the INFN CNAF computing center has started to investigate predictive and preventive maintenance solutions in order to improve fault diagnosis by applying machine learning techniques to hardware and service logs. An excellent experience has been carried out by three students who dedicated three summer months to collect logs of the StoRM services and the resources that host them, to preprocess these logs in order to remove all bias information and to perform initial data analysis. Here we are going to present the activities fulfilled by these students, the initial outcome and the ongoing work at the INFN CNAF data center. +\end{abstract} + +\section{Introduction} +In recent years INFN CNAF has put a great effort to define and implement a common monitoring infrastructure based on Sensu, InfluxDB and Grafana and to centralize logs from the most relevant services \cite{bovina2015, bovina2017}. Nowadays, this unified infrastructure has been fully integrated in the data center \cite{fattibene2018} and there is the intention to face the new challenge/opportunity to correlate this vast volume of data and extract actionable insights. + +During the summer 2018 a first investigation has been exploited with the help of three summer student \cite{seminario}. Once identified a specific system to analyze, i. e. StoRM, the following activities have been addressed: +\begin{itemize} +\item Log collection and harmonization +\item Log parsing of various services, such as StoRMfrontend, StoRMbackend, heartbeat, messages, GridFTP and GPFS (not covered in our study, but potentially interesting) +\item Metrics data adding (from Tier 1 InfluxDB) +\end{itemize} + +However, to provide a first proof of concept for the predictive and preventive maintenance, data categorization and machine learning techniques application represent two key points that have been conducted from the end 2018 and the middle 2019. + +\section{Log collection and harmonization} +The first part of the work consisted in the collection of StoRM logs from the StoRM servers dedicated to the Atlas experiment. + +Subsequently, most relevant information was extracted from the logs using the ELK Stack suite \cite{elk}. The ELK stack consists of four components: Beats used for data collection from multiple sources, Logstash used for data aggregation and processing, Elasticsearch used for store and index data, Kibana for data analysis and visualization. In particular, Logstash has been used to ingest data from Beats in a continuous live-feed streaming, filter relevant entries and parse each event, identifying named fields to build a user defined structure and ship parsed data to the Elasticsearch engine. Most data was filtered using a \textit{grok} filter which is based on regular expressions and provides predefined filters together with the ability of defining customized ones. +Finally, several dashboards were created using Kibana in order to show in a human-friendly way a summary of the most relevant information derived from StoRM logs (ee for example \ref{fig3}). +\begin{figure}[h] +\includegraphics[width=20pc]{kibana.png}\hspace{2pc} +\begin{minipage}[b]{14pc}\caption{\label{fig3}An example of Kibana dashboard created.} +\end{minipage} +\end{figure} + + +\section{Log parsing} +Among the INFN Tier 1 services hosted at the INFN CNAF computing center, there are efficient storage systems, like StoRM that is a grid Storage Resource Manager (SRM) solution. Figure \ref{fig1} shows the StoRM architecture: the frontend service manages user authentication and stores requests data, while the backend service executes SRM functionalities and takes care of space and authorization. +The log files contains basically three types of information: timestamp, metrics, and messages. +\begin{figure}[h] +\includegraphics[width=20pc]{StoRM-full-picture.png}\hspace{2pc}% +\begin{minipage}[b]{14pc}\caption{\label{fig1}The StoRM architecture.} +\end{minipage} +\end{figure} + +At the beginning of this work (mid 2018), StoRM at Tier 1 was monitored by InfluxDB and Grafana. Metrics monitored included CPU, RAM, network and disk usage; number of sync SRM request per minute per host; duration of async PTG and PTP per host (avg). We wanted to add information derived from the analysis of StoRM logs to already available monitoring information, in order to derive new insights potentially useful to enhance service availability and efficiency with the long-term intent of implementing a global predictive maintenance solution for Tier 1. In order to build a Machine Learning model for anomaly prediction, logs from two different period were analyzed: a normal behavior period and a critical behavior period (due to wrong configuration of the file system and wrong configuration of the queues coming from the farm). +A four-steps activity has been carried out: +\begin{enumerate} +\item Parsing: log files were parsed and deconstructed, converting them to CSV format +\item Feature selection: was done grouping messages based on their common content (core part of the message). The grouping phase resulted in 20 \textit{Request Types} (Connection, Run, Ping, Ls, Check permission, PTG, PTG status, Get space tokens, PTP, PTP status, BOL status, Put don, Release files, Mv, Mkdir, BOL, Abort request, Abort files, Get space metadata, nan) and 15 \textit{Result Types} (SRM\_SUCCESS, SRM\_FAILURE, SRM\_NOT\_SUPPORTED, SRM\_REQUEST\_QUEUED, SRM\_REQUEST\_INPROGRESS, Protocol check failed, Received 4 protocols, Some protocols supported, SRM\_DUPLICATION\_ERROR, rpcResponseHandler\_AbortFiles, SRM\_INVALID\_REQUEST, SRM\_INVALID\_PATH, Received 5 protocols, SRM\_INTERNAL\_ERROR, nan). A first data exploration phase was performed by counting occurrencies of messages in each group. +Techniques used for the feature selection procedure were: SelectKBest with the chi-squared statistical test, Recursive Feature Elimination, Principal Component Analysis (PCA) and Feature Importance from ensembles of decision tree methods. +\item One-hot encoding: CSV rows encoded in binary vectors (feature vectors). Each vector represents the summary of 15-minutes log contents. +\item Labelling: operation specific for StoRM log files done manually discriminating between normal and critical period based on help-desk tickets. +\end{enumerate} +Feature vectors obtained in (iii) and labeled datasets built in (iv) were used to train several ML algorithms and to test their accuracy. Figure \ref{fig2} depicts the results of tests performed on the following algorithms: LogisticRegression (LR), LinearDiscriminantAnalysis (LDA), KNeighborsClassifier (KNN), GaussianNB (GNB), DecisionTreeClassifier (CART), BaggingClassifier (BgDT), RandomForestClassifier (RF), ExtraTreesClassifier (ET), AdaBoostClassifier (AB), GradientBoostingClassifier (GB), XGBoostClassifier (XGB), MultiLayerPerceptronClassifier (MLP). + +\begin{center} +\begin{figure}[h] +\includegraphics[width=20pc]{MLalgorithms.png}\hspace{2pc} +\begin{minipage}[b]{14pc}\caption{\label{fig2}Machine Learning Algorithms Comparison (scorer=accuracy).} +\end{minipage} +\end{figure} +\end{center} + +\section{Metrics data adding} +This activity was mainly focused on collecting metric data from InfluxDB in order to put them in relation with StoRM logs obtained with activities explained in previous sections and extract new insights. +Key components of log files were identified, parsed and structured in a CSV file with the following columns: timestamp, metric, message, descriptive keys and separators. All timestamps were converted in UNIX epoch time in order to be comparable. On one side, InfluxDB stores information with different granularity depending on the age of data collected and on the other side, StoRM front-end and back-end logs are produced with different frequencies (one line each minute for heartbeat logs, multiple lines every minute for metrics logs, one line every five minutes for InfluxDB more recent data, and so on). Therefore, some concatenation rules have been implemented in order to correctly put in relation all data sources based on the time of occurrence of the event: backend metrics are split by type, timestamp is rounded off to oneâ€minute precision, in case of overlap the more recent is kept and every CSV file is concatenated and ordered by timestamp. + +\section{Conclusion} +This experience is a good example of mutually beneficial collaboration between university students and INFN CNAF. The outcome has allowed master students (i.e. Diotalevi T. and Giommi L.) to publish papers at international conferences \cite{diotalevi, giommi20191}, to win Giulia Vita Finzi's award \cite{giommi20192}, and to start their PhD courses with success. Furthermore, the undergraduate student (i.e. Rossi Tisbeni R) will hold a master degree in Physics in July 2019. On the other hand, the INFN CNAF data center managers has decided to continue exploiting predictive and preventive maintenance to establish where and when to use it to keep services running optimally. + +\section*{References} +\begin{thebibliography}{9} +\bibitem{seminario} Martelli B, Giommi L, Rossi Tisbeni S, Diotalevi T, https://agenda.infn.it/event/17430/, 2018. +\bibitem{bovina2015} Bovina S, Michelotto D, Misurelli G, \emph{CNAF Annual Report}, pp. 111--114, 2015. +\bibitem{bovina2017} Bovina S, Michelotto D, In Proc of CHEP 2017. +\bibitem{fattibene2018} Fattibene E, Dal Pra S, Falabella A, De Cristofaro T, Cincinelli G, Ruini M, In Proc of CHEP 2018. +\bibitem{diotalevi} Diotalevi T, Bonacorsi D, Michelotto D, Falabella A, In Proc of International Symposium on Grids \& Clouds (ISGC), Taipei, Taiwan, 2019 (under review). +\bibitem{giommi20191} Giommi L, Bonacorsi D, Diotalevi T, Rossi Tisbeni S, Rinaldi L, Morganti L, Falabella A, Ronchieri E, Ceccanti A, Martelli B, In Proc of International Symposium on Grids \& Clouds (ISGC), Taipei, Taiwan, 2019 (under review). +\bibitem{giommi20192} Giommi L, In INFN CCR Workshop, La Biodola, 3-7 June 2019. +\bibitem{elk}https://www.elastic.co/, site visited on June 2019. +\end{thebibliography} + +\end{document} + + diff --git a/contributions/sysinfo/sysinfo.tex b/contributions/sysinfo/sysinfo.tex index 68ec306ed040439ade60ecdef6296dd20e410662..b84777c85f288336171fb655861aa1adce95127a 100644 --- a/contributions/sysinfo/sysinfo.tex +++ b/contributions/sysinfo/sysinfo.tex @@ -6,16 +6,16 @@ \title{The INFN Information System} \author{ - Stefano Bovina$^1$, - Marco Canaparo$^1$, - Enrico Capannini$^1$, - Fabio Capannini$^1$, - Claudio Galli$^1$, - Guido Guizzunti$^1$, - Barbara Demin$^1$ + S. Bovina$^1$, + M. Canaparo$^1$, + E. Capannini$^1$, + F. Capannini$^1$, + C. Galli$^1$, + G. Guizzunti$^1$, + B. Demin$^1$ } -\address{$^1$ INFN CNAF, Viale Berti Pichat 6/2, 40126, Bologna, Italy} +\address{$^1$ INFN-CNAF, Bologna, IT} \ead{ stefano.bovina@cnaf.infn.it, diff --git a/contributions/tier1/tier1.tex b/contributions/tier1/tier1.tex index e6966cdd558c77e801c928d7bb8c729594fc41b4..f046928dee8f958c7d5402cb8c24e22cde8a4f01 100644 --- a/contributions/tier1/tier1.tex +++ b/contributions/tier1/tier1.tex @@ -13,16 +13,16 @@ \begin{document} -\title{The INFN Tier-1} +\title{The INFN Tier 1} -\author{Luca dell'Agnello} -\address{INFN-CNAF, Bologna, IT} +\author{Luca dell'Agnello$^1$} +\address{$^1$ INFN-CNAF, Bologna, IT} \ead{luca.dellagnello@cnaf.infn.it} \section{Introduction} -CNAF hosts the Italian Tier-1 data center for WLCG: over the years, Tier-1 has become the main computing facility for INFN. -Nowadays, besides the four LHC experiments, the INFN Tier-1 provides services and resources to 30 other scientific collaborations, including BELLE2 and several astro-particle experiments (Tab.\ref{T1-pledge})\footnote{CSN 1, CSN 2 and CSN 3 are the National Scientific Committees of the INFN, respectively, for experiments in high energy physics with accelerators, astro-particle experiments and experiments in nuclear physics with accelerators.}. As showns in Fig.~\ref{pledge2018}, besides LHC, the main users are the astro-particle experiments. +CNAF hosts the Italian Tier 1 data center for WLCG: over the years, Tier 1 has become the main computing facility for INFN. +Nowadays, besides the four LHC experiments, the INFN Tier 1 provides services and resources to 30 other scientific collaborations, including BELLE2 and several astro-particle experiments (Tab.\ref{T1-pledge})\footnote{CSN 1, CSN 2 and CSN 3 are the National Scientific Committees of the INFN, respectively, for experiments in high energy physics with accelerators, astro-particle experiments and experiments in nuclear physics with accelerators.}. As showns in Fig.~\ref{pledge2018}, besides LHC, the main users are the astro-particle experiments. \begin{figure}[h] @@ -40,7 +40,7 @@ Nowadays, besides the four LHC experiments, the INFN Tier-1 provides services an \begin{center} \includegraphics[width=15pc]{tape2018.png}\hspace{2pc}% % \caption{\label{tape2018}xxx} - \caption{\label{pledge2018}Relative requests of resources at INFN Tier-1} + \caption{\label{pledge2018}Relative requests of resources at INFN Tier 1} \end{center} \end{minipage}\hspace{2pc}% \end{center} @@ -166,7 +166,7 @@ Despite the flooding that occurred at the end of 2017, we were able to provide t \br \end{tabular} \end{center} - \caption{Pledged and installed resources at INFN Tier-1 in 2018 (for the CPU power an overlap factor is applied)} + \caption{Pledged and installed resources at INFN Tier 1 in 2018 (for the CPU power an overlap factor is applied)} \label{T1-pledge} \hfill \end{table} @@ -195,17 +195,17 @@ In fact, it was believed that the only threat due to water could come from a ver The post-mortem analysis showed that the causes, beside the breaking of the tube, are to be found in the unfavorable position (2 underground levels) and in the excessive permeability of the perimeter (while the anti-flood doors worked). Therefore, an intervention has been carried out to increase the waterproofing of the data center and, moreover, work is planned for summer 2019 to strengthen the perimeter of the building and build a second water collection tank. Even if the search for a new location to move the data center had started before the flooding (the main drive being its limited expandability not able to cope with the foreseen requirements for HL-LHC era when we should scale up to 10 MW of power for IT), the flooding gave us a second strong reason to move. -An opportunity is given by the new ECMWF center which will be hosted in Bologna, in a new Technopole area, starting from 2019. In the same area the INFN Tier-1 and the CINECA computing centers can be hosted too: funding has been guaranteed to INFN and CINECA by the Italian Government for this. The goal is to have the new data center for the INFN Tier-1 fully operational by the end of 2021. +An opportunity is given by the new ECMWF center which will be hosted in Bologna, in a new Technopole area, starting from 2019. In the same area the INFN Tier 1 and the CINECA computing centers can be hosted too: funding has been guaranteed to INFN and CINECA by the Italian Government for this. The goal is to have the new data center for the INFN Tier 1 fully operational by the end of 2021. -\section{INFN Tier-1 extension at CINECA}\label{CINECAext} +\section{INFN Tier 1 extension at CINECA}\label{CINECAext} As mentioned in the previous Paragraph, part of the farm is hosted at CINECA\footnote{CINECA is the Italian Supercomputing center, also located near Bologna ($\sim17$ far km from CNAF). See \url{http://www.cineca.it/}}. Out of the 400 kHS06 CPU power (340 kHS06 pledged) of the CNAF farm, $\sim180$ are provided by servers installed in the CINECA data center. %Each server is equipped with a 10 Gbit uplink connection to the rack switch while each of them, in turn, is connected to the aggregation router with 4x40 Gbit links. -The logical network of the farm partition at CINECA is set as an extension of INFN Tier-1 LAN: a dedicated fiber couple interconnects the aggregation router at CINECA with the core switch at the INFN Tier-1 (see Farm and Network Chapters for more details). %Fig.~\ref{cineca-t1}). +The logical network of the farm partition at CINECA is set as an extension of INFN Tier 1 LAN: a dedicated fiber couple interconnects the aggregation router at CINECA with the core switch at the INFN Tier 1 (see Farm and Network Chapters for more details). %Fig.~\ref{cineca-t1}). %The transmission on the fiber is managed by a couple of Infinera DCI, allowing to have a logical channel up to 1.2 Tbps (currently it is configured to transmit up to 400 Gbps). %\begin{figure} % % \begin{minipage}[b]{0.45\textwidth} @@ -227,7 +227,7 @@ Since this partition have been installed from the beginning with CentOS 7, legac \section*{References} \begin{thebibliography}{9} -\bibitem{FLOODCHEP} L. dell'Agnello, "Disaster recovery of the INFN Tier-1 data center: lesson learned" to be published in Proceedings of the 23rd International Conference on Computing in High Energy and Nuclear Physics - EPJ Web of Conferences +\bibitem{FLOODCHEP} L. dell'Agnello, "Disaster recovery of the INFN Tier 1 data center: lesson learned" to be published in Proceedings of the 23rd International Conference on Computing in High Energy and Nuclear Physics - EPJ Web of Conferences \bibitem{singularity} \url{http://singularity.lbl.gov} \end{thebibliography} diff --git a/contributions/user-support/main.tex b/contributions/user-support/main.tex index 09cd87d5a8872454bfe7775cb7702f9419b4667f..b7ecaf6f955e0876fb8de4e4356491bf13955ceb 100644 --- a/contributions/user-support/main.tex +++ b/contributions/user-support/main.tex @@ -2,34 +2,34 @@ \usepackage{graphicx} \begin{document} \title{User and Operational Support at CNAF} -\author{D. Cesini, E. Corni, F. Fornari, L. Morganti, C. Pellegrino, M. V. P. Soares, M. Tenti, L. Dell'Agnello} -\address{INFN-CNAF, Bologna, IT} +\author{D. Cesini$^1$, E. Corni$^1$, F. Fornari$^1$, L. Morganti$^1$, C. Pellegrino$^1$, M. V. P. Soares$^1$, M. Tenti$^1$, L. Dell'Agnello$^1$} +\address{$^1$ INFN-CNAF, Bologna, IT} \ead{user-support@lists.cnaf.infn.it} \begin{abstract} Many different research groups, typically organized in Virtual Organizations (VOs), -exploit the Tier-1 Data center facilities for computing and/or data storage and management. Moreover, CNAF hosts two small HPC farms and a Cloud infrastructure. The User Support unit provides to the users of all CNAF facilities with a direct operational support, and promotes common technologies and best-practices to access the ICT resources in order to facilitate the usage of the center and maximize its efficiency. +exploit the Tier 1 Data center facilities for computing and/or data storage and management. Moreover, CNAF hosts two small HPC farms and a Cloud infrastructure. The User Support unit provides to the users of all CNAF facilities with a direct operational support, and promotes common technologies and best-practices to access the ICT resources in order to facilitate the usage of the center and maximize its efficiency. \end{abstract} \section{Current status} Born in April 2012, the User Support team in 2018 was composed by one coordinator and up to five fellows with post-doctoral education or equivalent work experience in scientific research or computing. The main activities of the team include: \begin{itemize} \item providing a prompt feedback to VO-specific issues via ticketing systems or official mail channels; -\item forwarding to the appropriate Tier-1 units those requests which cannot be autonomously satisfied, and taking care of answers and fixes, e.g. via the tracker JIRA, until a solution is delivered to the experiments; +\item forwarding to the appropriate Tier 1 units those requests which cannot be autonomously satisfied, and taking care of answers and fixes, e.g. via the tracker JIRA, until a solution is delivered to the experiments; \item supporting the experiments in the definition and debugging of computing models in distributed and Cloud environments; \item helping the supported experiments by developing code, monitoring frameworks and writing guides and documentation for users (see e.g. https://www.cnaf.infn.it/en/users-faqs/); \item solving issues on experiment software installation, access problems, new accounts creation and any other daily usage problems; \item porting applications to new parallel architectures (e.g. GPUs and HPC farms); -\item providing the Tier-1 Run Coordinator, who represents CNAF at the Daily WLCG calls, and reports about resource usage and problems at the monthly meeting of the Tier-1 management body (Comitato di Gestione del Tier-1). +\item providing the Tier 1 Run Coordinator, who represents CNAF at the Daily WLCG calls, and reports about resource usage and problems at the monthly meeting of the Tier 1 management body (Comitato di Gestione del Tier 1). \end{itemize} -People belonging to the User Support team represent INFN Tier-1 inside the VOs. +People belonging to the User Support team represent INFN Tier 1 inside the VOs. In some cases, they are directly integrated in the supported experiments. Moreover, they can play the role of a member of any VO for debugging purposes. The User Support staff is also involved in different CNAF internal projects, notably the Computing on SoC Architectures (COSA) project (www.cosa-project.it) dedicated to the technology tracking and benchmarking of the modern low-power architectures for computing applications. \section{Supported experiments} -The LHC experiments represent the main users of the data center, handling more than 80\% of the total computing and storage resources funded at CNAF. Besides the four LHC experiments (ALICE, ATLAS, CMS, LHCb) for which CNAF acts as Tier-1 site, the data center also supports an ever increasing number of experiments from the Astrophysics, Astroparticle physics and High Energy Physics domains, and specifically Agata, AMS-02, Argo-YBJ, Auger, Belle II, Borexino, CDF, Compass, COSMO-WNEXT CTA, Cuore, Cupid, Dampe, DarkSide-50, Enubet, Famu, Fazia, Fermi-LAT, Gerda, Icarus, LHAASO, LHCf, Limadou, Juno, Kloe, KM3Net, Magic, NA62, Newchim, NEWS, NTOP, Opera, Padme, Pamela, Panda, Virgo, and XENON. +The LHC experiments represent the main users of the data center, handling more than 80\% of the total computing and storage resources funded at CNAF. Besides the four LHC experiments (ALICE, ATLAS, CMS, LHCb) for which CNAF acts as Tier 1 site, the data center also supports an ever increasing number of experiments from the Astrophysics, Astroparticle physics and High Energy Physics domains, and specifically Agata, AMS-02, Auger, Belle II, Borexino, CDF, Compass, COSMO-WNEXT CTA, Cuore, Cupid, Dampe, DarkSide-50, Enubet, Famu, Fazia, Fermi-LAT, Gerda, Icarus, LHAASO, LHCf, Limadou, Juno, Kloe, KM3Net, Magic, NA62, Newchim, NEWS, NTOP, Opera, Padme, Pamela, Panda, Virgo, and XENON. Clearly, a bigger effort from the User Support team is needed to answer to the varied and diverse needs from these no-LHC experiments and to encourage them to adopt more modern technologies, e.g. FTS, Dirac, token-based authorization. \begin{figure}[ht] @@ -60,12 +60,13 @@ The following figures show resources pledged and used by the supported experimen Unfortunately, the accounting data for storage, both disk and tape statistics, are available only after summer 2018, given the restoration of the complex system of sensors for accounting after the 2017 flooding had a lower priority with respect to activities needed for a complete of the storage resources involved in the flood. \section{Support to HPC and cloud-based experiment} -Apart from Tier-1 facilities, CNAF hosts two small HPC farms and a cloud infrastructure. The first HPC cluster, in production since 2015, is composed of 27 nodes, some of them also equipped with one or more GPUs (NVIDIA Tesla K20, K40 and K1). All nodes are infiniband interconnected and equipped with 2 Intel CPUs, 8 physical cores each, HyperThread enabled. The cluster is accessible via the LSF batch system. It is open to various INFN communities, but the main users are theoretical physicist dealing with plasma laser acceleration simulations. The cluster serves as testing infrastructure to prepare the high resolution runs submitted to supercomputers. +Apart from Tier 1 facilities, CNAF hosts two small HPC farms and a cloud infrastructure. The first HPC cluster, in production since 2015, is composed of 27 nodes, some of them also equipped with one or more GPUs (NVIDIA Tesla K20, K40 and K1). All nodes are infiniband interconnected and equipped with 2 Intel CPUs, 8 physical cores each, HyperThread enabled. The cluster is accessible via the LSF batch system. It is open to various INFN communities, but the main users are theoretical physicists dealing with plasma laser acceleration simulations. The cluster is used as a testing infrastructure to prepare the high resolution runs to be submitted afterwards to supercomputers. -A second HPC cluster entered into production in 2017 to serve the CERN accelerators R/D groups. The cluster consists of 12 nodes OmniPath interconnected. Can be access through batch queues managed by the IBM LSF system. +A second HPC cluster entered into production in 2017 to serve the CERN accelerators R/D groups. The cluster consists of 12 nodes OmniPath interconnected. It can be access through batch queues managed by the IBM LSF system. The support is provided on a daily base for what concerns software installation, access problems, new accounts creation and any other usage problems. -The User Support team manages an OpenStack-based tenant hosted within the Cloud@CNAF. This tenant, provided with 300 vCPUs, is mostly devoted to support peculiar use cases which require unusual software configurations and only for a limited amount of time. The most important of these use cases is the FAZIA experiment, for which 256 vCPUs were provided, distributed over 16 worker nodes with 8GB of RAM each, where the Debian 8.4 operating system has been installed and configured with LDAP+Kerberos for user authentication and authorization, and NFS 4 for network storage sharing. Recently, other experiments started accessing the Cloud infrastructure: AMS, EEE, FAZIA, Icarus and NTOF. +The User Support team manages an OpenStack-based tenant hosted within the Cloud@CNAF. This tenant, provided with 300 vCPUs, is mostly devoted to support peculiar use cases which require unusual software configurations and only for a limited amount of time. The most important of these use cases is the FAZIA experiment, for which 256 vCPUs were provided, distributed over 16 worker nodes with 8GB of RAM each, where the Debian 8.4 operating system has been installed and configured with LDAP and Kerberos for user authentication and authorization, and NFS 4 for network storage sharing. +Recently, other experiments started accessing the Cloud infrastructure: AMS, EEE, Icarus and NTOF. \end{document} diff --git a/contributions/virgo/AdV_computing_CNAF.tex b/contributions/virgo/AdV_computing_CNAF.tex index dc1f70b4b8c7f806cf12a11833a4e16700195964..bd175755ad35f37567b57a47b69d2a2fdb233ce4 100644 --- a/contributions/virgo/AdV_computing_CNAF.tex +++ b/contributions/virgo/AdV_computing_CNAF.tex @@ -5,18 +5,18 @@ %\author{P. Astone$^1$, F. Badaracco$^{2,3}$, S. Bagnasco$^4$, S. Caudill$^5$, F. Carbognani$^6$, A. Cirone$^{7,8}$, G. Fronz\'e$^{4}$, J. Harms$^{2,3}$, I. LaRosa$^1$, C. Lazzaro$^9$, P. Leaci$^1$, S. Lusso$^4$, C. Palomba$^1$, R. DePietri$^{11,12}$, M. Punturo$^{10}$, L. Rei$^8$, L. Salconi$^6$, S. Vallero$^{4}$, on behalf of the Virgo collaboration} \author{P. Astone$^1$, F. Badaracco$^{2,3}$, S. Bagnasco$^4$, S. Caudill$^5$, F. Carbognani$^6$, A. Cirone$^{7,8}$, M. Drago$^{2,3}$, G. Fronz\'e$^{4}$, J. Harms$^{2,3}$, I. LaRosa$^1$, C. Lazzaro$^9$, P. Leaci$^1$, S. Lusso$^4$, C. Palomba$^1$, R. DePietri$^{11,12}$, M. Punturo$^{10}$, L. Rei$^8$, L. Salconi$^6$, S. Vallero$^{4}$, on behalf of the Virgo collaboration} -\address{$^1$ INFN, Roma, IT} -\address{$^2$ Gran Sasso Science Institute (GSSI), IT} -\address{$^3$ INFN, Laboratori Nazionali del Gran Sasso, IT} -\address{$^4$ INFN, Torino, IT} -\address{$^5$ Nikhef, Science Park, NL} -\address{$^6$ EGO-European Gravitational Observatory, Cascina, Pisa, IT} -\address{$^7$ Universit\`a degli Studi di Genova, IT} -\address{$^8$ INFN, Genova, IT} -\address{$^9$ INFN, Padova, IT} -\address{$^{10}$ INFN, Perugia, IT} -\address{$^{11}$ Universit\`a degli Studi di Parma, IT} -\address{$^{12}$ INFN, Gruppo Collegato Parma, IT} +\address{$^1$ INFN Sezione di Roma, Roma, IT} +\address{$^2$ Gran Sasso Science Institute (GSSI), L'Aquila, IT} +\address{$^3$ INFN Laboratori Nazionali del Gran Sasso, L'Aquila, IT} +\address{$^4$ INFN Sezione di Torino, Torino, IT} +\address{$^5$ Nikhef, Amsterdam, NL} +\address{$^6$ EGO-European Gravitational Observatory, Cascina (PI), IT} +\address{$^7$ Universit\`a degli Studi di Genova, Genova, IT} +\address{$^8$ INFN Sezione di Genova, Genova, IT} +\address{$^9$ INFN Sezione di Padova, Padova, IT} +\address{$^{10}$ INFN Sezione di Perugia, Perugia, IT} +\address{$^{11}$ Universit\`a degli Studi di Parma, Parma, IT} +\address{$^{12}$ INFN Gruppo Collegato Parma, Parma, IT} %\address{Production Editor, \jpcs, \iopp, Dirac House, Temple Back, Bristol BS1~6BE, UK} @@ -32,7 +32,7 @@ The amount of data processed during the last few years has emphasized the fact t \section{Advanced Virgo computing model} \subsection{Data production and data transfer} -The Advanced Virgo data acquisition system is writing about 35MB/s of data (so-called ``bulk data'') during O3. CNAF and CC-IN2P3 are the Virgo Tier-0: during the science runs, bulk data is stored in a circular buffer located at the Virgo site, and simultaneously transferred to the remote computing centres where they are archived in tape libraries. The transfer is realized through an ad-hoc procedure based on GridFTP (at CNAF) and iRods (at CC-IN2P3). Other data fluxes reach CNAF during science runs: +The Advanced Virgo data acquisition system is writing about 35MB/s of data (so-called ``bulk data'') during O3. CNAF and CC-IN2P3 are the Virgo Tier 0: during the science runs, bulk data is stored in a circular buffer located at the Virgo site, and simultaneously transferred to the remote computing centers where they are archived in tape libraries. The transfer is realized through an ad-hoc procedure based on GridFTP (at CNAF) and iRods (at CC-IN2P3). Other data fluxes reach CNAF during science runs: \begin{itemize} \item trend data (few GB/day), periodically transferred using the system described above; diff --git a/contributions/xenon/main.tex b/contributions/xenon/main.tex index 477de5de545a20bcd43a177961b6ea432bb9f20b..600fd7e47b9cdebe5ab0fb820b7350ff5973dac1 100644 --- a/contributions/xenon/main.tex +++ b/contributions/xenon/main.tex @@ -20,9 +20,9 @@ \title{XENON computing model} %\pagestyle{fancy} -\author{M. Selvi} +\author{Marco Selvi$^1$} -\address{INFN - Sezione di Bologna} +\address{$^1$ INFN Sezione di Bologna, Bologna, IT} \ead{marco.selvi@bo.infn.it}