From bf2c4d7625e2738418d9f0584afedf019f3031e0 Mon Sep 17 00:00:00 2001 From: Dmytro Bogatov <dmytro@dbogatov.org> Date: Mon, 4 Dec 2017 21:12:19 -0500 Subject: [PATCH] Final fixes. --- .gitlab-ci.yml | 2 +- .vscode/tasks.json | 4 +- build.sh | 12 ++- cli.tex | 17 +++-- definitions.tex | 4 + main.tex | 2 +- packages.tex | 1 + sections/appendix.tex | 71 +++++++++-------- sections/applications-and-extensions.tex | 12 +-- sections/bounds-on-stash-usage.tex | 6 +- sections/conclusion.tex | 2 +- sections/evaluation.tex | 10 +-- sections/example.tex | 15 ++-- sections/oblivious-memory.tex | 12 +-- sections/overview-of-other-orams.tex | 4 +- sections/path-oram-protocol.tex | 89 +++++++++++++++------- sections/problem-definition.tex | 8 +- sections/recursion-and-parametrization.tex | 2 + settings.tex | 28 +------ 19 files changed, 165 insertions(+), 136 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 4f07687..e9a8349 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -35,7 +35,7 @@ artifacts: script: - printf "\providecommand{\\\version}{%s}" $(echo $CI_BUILD_REF | cut -c1-8) > version.tex - ./build.sh - - ./build.sh -n -j presentation-with-notes + - ./build.sh -n on -j presentation-with-notes - ./build.sh -b - mv dist/*.pdf . artifacts: diff --git a/.vscode/tasks.json b/.vscode/tasks.json index bad535a..b1fb334 100644 --- a/.vscode/tasks.json +++ b/.vscode/tasks.json @@ -19,7 +19,7 @@ { "label": "Compile once notes", "type": "shell", - "command": "./build.sh -g -i 1 -n", + "command": "./build.sh -g -i 1 -n on", "group": { "kind": "build", "isDefault": true @@ -35,7 +35,7 @@ { "label": "Compile multiple notes", "type": "shell", - "command": "./build.sh -g -n", + "command": "./build.sh -g -n on", "group": "build", "presentation": { "echo": true, diff --git a/build.sh b/build.sh index 1449cc7..5db7ecd 100755 --- a/build.sh +++ b/build.sh @@ -8,10 +8,10 @@ INTERACTION=batchmode OUTDIR=dist JOBNAME=presentation ITERATIONS=3 +NOTES="off" LATEX_DEBUG="" -LATEX_NOTES="" -usage() { echo "Usage: $0 [-i <number> -g -v -t -n -b -j <string>]" 1>&2; exit 1; } +usage() { echo "Usage: $0 [-i <number> -g -v -t -n <string: on|only|off> -b -j <string>]" 1>&2; exit 1; } log() { if [ -n "${VERBOSE}" ]; then @@ -70,7 +70,7 @@ bibliography () { echo "Done." } -while getopts "c:j:i:vgtnb" o; do +while getopts "c:j:i:n:vgtb" o; do case "${o}" in c) COMPILER=${OPTARG} @@ -90,7 +90,7 @@ while getopts "c:j:i:vgtnb" o; do JOBNAME=${OPTARG} ;; n) - NOTES=true + NOTES=${OPTARG} ;; i) ITERATIONS=${OPTARG} @@ -119,9 +119,7 @@ if [ -n "${DEBUG}" ]; then LATEX_DEBUG="\def\debug{true}" fi -if [ -n "${NOTES}" ]; then - LATEX_NOTES="\def\generatenotes{true}" -fi +LATEX_NOTES="\def\generatenotes{$NOTES}" echo "Compiling the project into ${JOBNAME}.pdf ..." diff --git a/cli.tex b/cli.tex index ac492ae..ebee217 100644 --- a/cli.tex +++ b/cli.tex @@ -10,11 +10,12 @@ \releasetrue% \fi - -\newif\ifnotes% - -\ifdefined\generatenotes% - \notestrue% -\else - \notesfalse% -\fi +\ifnum\pdfstrcmp{\generatenotes}{on}=0% + \newcommand{\notesOption}{show notes on second screen}% +\else% + \ifnum\pdfstrcmp{\generatenotes}{only}=0% + \newcommand{\notesOption}{show only notes}% + \else% + \newcommand{\notesOption}{hide notes}% + \fi% +\fi% diff --git a/definitions.tex b/definitions.tex index b4a9b89..1f78ddc 100644 --- a/definitions.tex +++ b/definitions.tex @@ -27,3 +27,7 @@ ypos=-25 ]{\usebox\watermark} \fi + +\makeatletter + \newcommand{\manuallabel}[2]{\def\@currentlabel{#2}\label{#1}} % chktex 21 +\makeatother diff --git a/main.tex b/main.tex index 58285cf..6d557e4 100644 --- a/main.tex +++ b/main.tex @@ -35,7 +35,7 @@ Start with the problem statement. What an ORAM is and why we need it. \item - Talk about what ORAMs are already there and what they disadvantages are. + Talk about what ORAMs are already there and what their disadvantages are. \item Then go in the same order as paper goes. \end{itemize} diff --git a/packages.tex b/packages.tex index 98caba0..378463e 100644 --- a/packages.tex +++ b/packages.tex @@ -25,3 +25,4 @@ \usepackage[printwatermark]{xwatermark} \usepackage{fontawesome} +\usepackage{ifthen} diff --git a/sections/appendix.tex b/sections/appendix.tex index 003e3cc..55b00fd 100644 --- a/sections/appendix.tex +++ b/sections/appendix.tex @@ -1,60 +1,71 @@ -% cSpell:ignore DBLP +% cSpell:ignore DBLP manuallabel \begin{frame}{Max stash size grows linearly with $\lambda$} \begin{center} - \includegraphics[ - height=5cm, - keepaspectratio - ]{static/images/max-stash-size-linear.png} - + \begin{figure}\manuallabel{fig:max-stash-size-linear}{figure} + + \includegraphics[ + height=5cm, + keepaspectratio + ]{static/images/max-stash-size-linear.png} + + \end{figure} + \end{center} Figure 3 from~\cite{DBLP:journals/corr/abs-1202-5150}. \end{frame} -\begin{frame}{Max stash size does not depend on $N$} +\begin{frame}{Max stash size for large security parameters} \begin{center} - \includegraphics[ - height=5cm, - keepaspectratio - ]{static/images/stash-size-does-not-depend-on-n.png} + \begin{figure}\manuallabel{tbl:stash-size-for-sec-parameters}{table} - \end{center} + \begin{tabular}{ l c c c } - Figure 4 from~\cite{DBLP:journals/corr/abs-1202-5150}. + \toprule% -\end{frame} + \multirow{3}{*}{\textbf{Security parameter} $\lambda$} & \multicolumn{3}{c}{\textbf{Bucket size} $Z$} \\ + & 4 & 5 & 6 \\ + & \multicolumn{3}{c}{\textbf{Max stash size}} \\ -\begin{frame}{Max stash size for large security parameters} - - \begin{center} + \midrule% + + 80 & 89 & 63 & 53 \\ + 128 & 147 & 105 & 89 \\ + 256 & 303 & 218 & 186 \\ + + \bottomrule% - \begin{tabular}{ l c c c } + \end{tabular} - \toprule% + \end{figure} - \multirow{3}{*}{\textbf{Security parameter} $\lambda$} & \multicolumn{3}{c}{\textbf{Bucket size} $Z$} \\ - & 4 & 5 & 6 \\ - & \multicolumn{3}{c}{\textbf{Max stash size}} \\ + \end{center} - \midrule% + Figure 5 from~\cite{DBLP:journals/corr/abs-1202-5150}. - 80 & 89 & 63 & 53 \\ - 128 & 147 & 105 & 89 \\ - 256 & 303 & 218 & 186 \\ +\end{frame} - \bottomrule% +\begin{frame}{Max stash size does not depend on $N$} + + \begin{center} - \end{tabular} + \begin{figure}\manuallabel{fig:stash-size-does-not-depend-on-n}{figure} + + \includegraphics[ + height=5cm, + keepaspectratio + ]{static/images/stash-size-does-not-depend-on-n.png} + + \end{figure} \end{center} - Figure 5 from~\cite{DBLP:journals/corr/abs-1202-5150}. + Figure 4 from~\cite{DBLP:journals/corr/abs-1202-5150}. \end{frame} - diff --git a/sections/applications-and-extensions.tex b/sections/applications-and-extensions.tex index cc37c89..8f34201 100644 --- a/sections/applications-and-extensions.tex +++ b/sections/applications-and-extensions.tex @@ -9,6 +9,8 @@ PathORAM can be used to perform search on an oblivious binary search tree, using $\BigO{\log^2 N}$ bandwidth.~\cite{Gentry:2013} \note{ + \textcite{Gentry:2013} suggested that ORAMs can be used to perform search on an oblivious binary search tree. + Underlying data structure for PathORAM is an oblivious binary tree. One \textsc{Access} for the ORAM is equivalent to binary tree search. This way, without re-randomization and write back subroutine, PathORAM \textsc{Access} is the same as binary search. @@ -22,7 +24,7 @@ \note{ If we are using recursive PathORAM, we can upload and download client state --- which is $\BigO{\log N} \cdot \omega (1)$ --- before each access. - This ways we can build a \emph{stateless} ORAM --- potentially, multi-user ORAM\@. + This way we can build a \emph{stateless} ORAM --- potentially, multi-user ORAM\@. } \end{frame} @@ -36,8 +38,8 @@ \note{ Due to its simplicity, PathORAM is particularly good for silicon implementations. - For example, Martin Mass has build such implementation using FPGAs. - Fletcher and Ren built a simulator for a processor based on PathORAM\@. + For example, \textcite{Maas:EECS-2014-89} has build such implementation using FPGAs. % chktex 8 + \textcite{Fletcher:2012:SPA:2382536.2382540} and \textcite{ren2013design} built a simulator for a processor based on PathORAM\@. } \end{frame} @@ -52,7 +54,7 @@ \] \note{ - It is possible to treat PAthORAM internal tree structure as a Merkle tree. - Each node is tagged with the hash of the following form, which is a concatenation of hashes of all blocks in the bucket, and left and right child of the node. + It is possible to treat PathORAM internal tree structure as a Merkle tree. + Each node is tagged with the hash of the following form, which is a concatenation of hashes of all blocks in the bucket, and the children of the node. } \end{frame} diff --git a/sections/bounds-on-stash-usage.tex b/sections/bounds-on-stash-usage.tex index d75668d..5e2c4a0 100644 --- a/sections/bounds-on-stash-usage.tex +++ b/sections/bounds-on-stash-usage.tex @@ -8,7 +8,7 @@ For a bucket size $Z = 5$, tree height $L = \ceil{\log N}$ and stash size $R$, the probability of a PathORAM failure after a sequence of load/store operations corresponding to $\bm{a}$, is at most \[ - \Pr \left[ \text{st} \left( \text{ORAM}_L^5 [ \bm{s} ] \right) > R | a( \bm{s} ) = \bm{a} \right] \le 14 \cdot (0.6002)^R + \Pr \left[ \text{st} \left( \text{ORAM}_L^5 [ \bm{s} ] \right) > R \; | \; a( \bm{s} ) = \bm{a} \right] \le 14 \cdot (0.6002)^R \] where the probability is over the randomness that determines $\bm{x}$ and $\bm{y}$ in $\bm{s = (a, x, y)}$. @@ -17,7 +17,7 @@ \note{ The whole proof of negligible failure probability is about proving this theorem. - It might look complex, but simply put, what is says is that for any ORAM with 5 blocks per bucket, stash usage exceeds some stash size $R$ with probability at most exponentially small with respect ot $R$. + It might look complex, but simply put, what it says is that for any ORAM with 5 blocks per bucket, stash usage exceeds some stash size $R$ with probability at most exponentially small with respect to $R$. Or, even simpler, the probability of exceeding stash capacity decreases exponentially with the stash size, given that the bucket size $Z$ is large enough. @@ -45,6 +45,6 @@ Second, they show that the stash usage after post-processing is $> R$ if and only if there exists a subtree $T$ for which its ``usage'' in $\infty$-ORAM is more than its ``capacity''. - Finally, they show how a mixture of a binomial and a geometric probability distribution expresses the probability of the number of real blocks that do not get evicted from a subtree after a sequence of load/store operations. + Finally, they show how a mixture of binomial and geometric probability distributions expresses the probability of the number of real blocks that do not get evicted from a subtree after a sequence of load/store operations. } \end{frame} diff --git a/sections/conclusion.tex b/sections/conclusion.tex index 3a2787a..192789a 100644 --- a/sections/conclusion.tex +++ b/sections/conclusion.tex @@ -14,7 +14,7 @@ \note{ The conclusion given in the paper is very concise, so I have just copied it down from the paper. - To iterate, pathORAM is simple, practical, requires small client storage. + To iterate, PathORAM is simple, practical and requires small client storage. The paper proves asymptotic bounds and negligible probability of failure. Lastly, the practical evaluation of theoretical results is given. } diff --git a/sections/evaluation.tex b/sections/evaluation.tex index 8def544..413b1c5 100644 --- a/sections/evaluation.tex +++ b/sections/evaluation.tex @@ -23,7 +23,7 @@ \note{ In the experiments, authors used a binary tree of height $\ceil*{\log_2 N} - 1$. - Let us define \emph{stash occupancy} is the number of overflowing blocks. + Let us define \emph{stash occupancy} as the number of overflowing blocks. Thus, this would be a client's persistent local storage in addition to $Z \log_2 N$ transient storage for storing single path. We access PathORAM in a round-robin fashion --- it is proven to be the worst-case scenario for PathORAM\@. @@ -42,16 +42,16 @@ \item Required stash size grows linearly with the security parameter. Failure probability decreases exponentially with the stash size. - See appendix. + See~\ref{fig:max-stash-size-linear} in appendix. \item Extrapolate those results for realistic values of $\lambda$. - See appendix. + See~\ref{tbl:stash-size-for-sec-parameters} in appendix. \item It is by definition infeasible to simulate for practically adopted security parameters (e.g., $\lambda = 128$) \item Required stash size for a low failure probability does not depend on $N$. This shows PathORAM has good scalability. - See appendix. + See~\ref{fig:stash-size-does-not-depend-on-n} in appendix. \end{itemize} \end{block} @@ -92,7 +92,7 @@ \note{ Another set of observations about bucket load. - For $Z \in \{ 4, 5 \}$, the usage of a subtree is close to the number of buckets in it. + For $Z \ge 4$ the usage of a subtree is close to the number of buckets in it. This means, we do not waste space with dummy blocks. For the levels close to the root, the bucket load is 1 block. diff --git a/sections/example.tex b/sections/example.tex index 00fad7a..be54d93 100644 --- a/sections/example.tex +++ b/sections/example.tex @@ -11,7 +11,7 @@ {\drawMap{Initial position map}} { Here we see the initial state before we access an element. - Buckets are filled greedily from bottom to the top. + Buckets are filled greedily from the bottom to the top. Leaves are numbered left to right. Each number in the buckets corresponds to encrypted data block and each empty set symbol corresponds to dummy data block. Position table shows the mapping from data blocks to leaves. @@ -58,6 +58,7 @@ \end{block} } { + \vspace{10pt} \begin{block}{Remap} Assign random leaf @@ -69,16 +70,16 @@ \end{block} } { - Now all those data blocks appear in stash. - We now for a fact that our block of interest is there as well. + Now, all those data blocks appear in the stash. + We know for a fact that our block of interest is there as well. We extract it to data variable. We change block's data and re-encrypt all data blocks. We then put it back to stash. It is important that we also re-map the block. - Let say we assign it new leaf --- number 3. + Let say we assign it a new leaf --- number 3. - Now we need to write path back. + Now we need to write the path back. } \exampleFrame% @@ -101,7 +102,7 @@ } { We start filling buckets with data blocks greedily from leaves. - We pick those $Z = 3$ blocks from stash which can be placed on the level in path not breaking invariant. + We pick those $Z = 3$ blocks from the stash which can be placed on the level in path not breaking invariant. If fewer than $Z$ blocks can be placed, we pad it with dummy blocks. } @@ -195,7 +196,7 @@ Please note that the adversary only sees that we have read whole path and written whole path. She is not able to see which data block was modified because we re-encrypted everything. - Keep in mind that it is possible that stash is not empty at the end of operation. + Keep in mind that it is possible that stash is not empty at the end of an operation. It is unlikely to happen. The stash is emptied during the next accesses. } diff --git a/sections/oblivious-memory.tex b/sections/oblivious-memory.tex index ace2f00..4dcf867 100644 --- a/sections/oblivious-memory.tex +++ b/sections/oblivious-memory.tex @@ -30,9 +30,9 @@ An adversary can see the access pattern. It can see which records are accessed more often. Which records are accessed only after some other records were touched. - How ofter read vs write operations occur. + How often read vs write operations occur. - \emph{What are the examples of attacks when access pattern is leaked?} + Talk about~\cite{Dautrich:2013:CPP:2452376.2452397} paper. } \end{frame} @@ -44,7 +44,7 @@ \note{ A solution is to design an oblivious memory access system. - This definition of oblivious machine is cited from the original paper on ORAMs by Goldreich and others from May 1996 --- around my birthday. + This definition of oblivious machine is cited from the original paper on ORAMs by \textcite{Goldreich:1996:SPS:233551.233553} from May 1996 --- around my birthday. Among the other things the paper states a number of theorems on computational bounds of generic ORAMs. Although the paper analyzes generic ORAMs, 20 years ago people were more concerned about CPU working with RAM access patterns. @@ -74,7 +74,9 @@ \note{ This are some theorems stated and proved in the paper. I am not going to do proofs here. - The idea is that these are theoretical bounds for generic ORAMs. - Designing our owm ORAM the aim is to come as close as possible to lower bounds. + The idea is that these are the theoretical bounds for generic ORAMs. + Designing our own ORAM the aim is to come as close as possible to lower bounds. + + In fact, PathORAM does hit the lower bound --- $\log m$ steps per each access. } \end{frame} diff --git a/sections/overview-of-other-orams.tex b/sections/overview-of-other-orams.tex index 115b0ea..1d47e9d 100644 --- a/sections/overview-of-other-orams.tex +++ b/sections/overview-of-other-orams.tex @@ -37,7 +37,7 @@ Worst-case scenarios shown. \note{ - Chang and others published a great paper a year ago doing accurate comparison of known ORAM systems. + \textcite{Chang:2016:ORD:2994509.2994528} published a great paper a year ago doing accurate comparison of known ORAM systems. They analyzed space and time complexity of the systems. The result is on the table. @@ -45,7 +45,7 @@ Server and client respectively show how much space is used by an ORAM on the server and on the client. Computational overhead is a composite of communication, encryption/decryption and client running overheads \emph{per access}. - I am not going to elaborate on all ORAMs, but it is clear that one of them wins in every category. + I am not going to elaborate on all ORAMs, but it is clear that one of them wins in almost every category. This is why we have chosen it for our secure cloud. } \end{frame} diff --git a/sections/path-oram-protocol.tex b/sections/path-oram-protocol.tex index 3381e24..4e2b5c8 100644 --- a/sections/path-oram-protocol.tex +++ b/sections/path-oram-protocol.tex @@ -8,25 +8,53 @@ \input{diagrams/protocol} - \note{ - TODO + \note<1> { + Here is the diagram of PathORAM protocol. + There are two components in the ORAM model and a user interacting with the system. + + Server basically holds a binary tree of buckets with encrypted blocks. + + Client holds two data structures --- position map and stash --- we will talk about them in a moment. + } + + \note<2>{ + First, user makes a request to the (trusted) client part. + User requests --- to read or to write --- a block with certain identifier. } + + \note<3,4>{ + If client does not have the block in stash, it makes a request to the server and reads a path with encrypted blocks. + + Server responds with a path of buckets of encrypted blocks. + } + + \note<5>{ + Client manipulates these blocks --- re-encrypts them, shuffles them and write the path back. + We will talk about how client writes path back in a randomized way. + } + + \note<6>{ + Finally, a client returns data to the user. + } + \end{frame} \begin{frame}{Main invariant} The client stores a small amount of local data in a \textbf{stash}. - The server-side storage is treated as a \textbf{binary tree} where each node is a \textbf{bucket} that can hold up to a fixed number of \textbf{blocks}. + The server-side storage is treated as a \textbf{binary tree} where each node is a \textbf{bucket} that holds an exact fixed number of \textbf{blocks}. \begin{block}{Invariant} At any time, each block is mapped to a uniformly random leaf bucket in the tree, and unstashed blocks are always placed in some bucket along the path to the mapped leaf. \end{block} \note{ - An underlying data structure for PAth ORAM is a binary tree. + An underlying data structure for PathORAM is a binary tree. Client has a position table where each data block ID is mapped to the leaf node. Each time access occurs, whole path from the leaf to the root gets read and written. This ensures indistinguishability. + + The goal of the invariant is to keep position map accurate at all times. } \end{frame} @@ -36,23 +64,23 @@ \begin{block}{Binary tree} The server stores a binary tree data structure of height $L$ and $2^L$ leaves. - We then need $L = \ceil*{ \log_2 N }$. + We then need $L = \ceil*{ \log_2 N }$ levels. The levels of the tree are numbered $0$ to $L$ where level $0$ denotes the root of the tree and level $L$ denotes the leaves. \end{block} \note{ Let us define $L$ --- the height of our tree. Then reasonably it will be equal to $\log_2 N$ rounded up. - Let us also define 0 level as root and $L$th level as leaves. + Let us also define 0 level as root and $L_\text{th}$ level as leaves. } \end{frame} \begin{frame}{\subsecname} \begin{block}{Bucket} - Each node in the tree is called a bucket. + Each node in the tree is called \textbf{bucket}. Each bucket can contain up to $Z$ real blocks. - If a bucket has less than $Z$ real blocks, it is padded with dummy blocks to always be of size $Z$. + If a bucket has fewer than $Z$ real blocks, it is padded with dummy blocks to always be of size $Z$. \end{block} \note{ @@ -67,13 +95,13 @@ \begin{block}{Path} Let $x \in \{ 0, 1, \ldots, 2^L - 1 \}$ denote the $x_{\text{th}}$ leaf node in the tree. - Any leaf node $x$ defines a unique path from leaf $x$ to the root of the tree. - We use $\mathcal{P}(x)$ to denote set of buckets along the path from leaf $x$ to the root. + Any leaf node $x$ defines a unique path from the root of the tree to the leaf $x$. + We use $\mathcal{P}(x)$ to denote a set of buckets along the path from leaf $x$ to the root. Additionally, $\mathcal{P}(x,l)$ denotes the bucket in $\mathcal{P}(x)$ at level $l$ in the tree. \end{block} \note{ - Let us define a path from leaf $x$ to root as $\mathcal{P}(x)$. + Let us define a path from leaf $x$ to the root as $\mathcal{P}(x)$. Let us also define $\mathcal{P}(x,l)$ as the bucket along the path at level $l$. } \end{frame} @@ -82,12 +110,12 @@ \begin{block}{Server storage size} Total server storage used is about $Z \cdot N$ blocks. - Since $Z$ is s small constant, server storage is $\BigO{N}$. + Since $Z$ is a small constant, server storage is $\BigO{N}$. \end{block} \note{ Let us make an important observation. - The total server storage used is the order of $N$ since $Z$ is s small constant. + The total server storage used is the order of $N$ since $Z$ is a small constant. } \end{frame} @@ -96,7 +124,7 @@ \begin{frame}{\subsecname} \begin{block}{Stash} - The client locally stores overflowing blocks in a local data structure $S$ called the stash. + The client locally stores overflowing blocks in a local data structure $S$ called \textbf{stash}. The stash has a worst-case size of $\BigO{\log N} \cdot \omega (1)$ blocks with high probability. The stash is usually empty after each ORAM read/write operation completes. \end{block} @@ -104,8 +132,8 @@ \note{ The core component of the client is stash. It is a local data structure that stores overflowing blocks. - PathORAM protocol is a randomized, and the error is the event that this stash overflows. - The analysis, though, shows that this happens with negligible probability. + PathORAM protocol is randomized, and the error is the event that this stash overflows. + The analysis, though, shows that for $\log N$ size of the stash this happens with negligible probability. } \end{frame} @@ -118,21 +146,21 @@ \note{ Another core structure of the client is the position table. - It is a simple lookup table that maps the block identifier with identifier $a$ to some leaf $x$. - The bucket does not necessarily live in the leaf, but it is guaranteed to leave somewhere along the path or in the stash. - The key to security is the re-randomization of this table each access. + It is a simple lookup table that maps the block with identifier $a$ to some leaf $x$. + The bucket does not necessarily live in the leaf, but it is guaranteed to live somewhere along the path or in the stash. + The key to security is the re-randomization of this table on each access. } \end{frame} \begin{frame}{\subsecname} \begin{block}{Bandwidth} - For each load or store operation, the client reads a path of $Z \log N$ blocks from the server and then writes them back, resulting in a total of $2Z \log N$ blocks bandwidth used per access. + For each load or store operation, the client reads a path of $Z \log N$ blocks from the server and then writes it back, resulting in a total of $2Z \log N$ blocks bandwidth used per access. Since $Z$ is a constant, the bandwidth usage is $\BigO{\log N}$ blocks. \end{block} \note{ - Each read and write, client reads whole path and writes it back. + Each read and write, client reads the whole path and writes it back. A path is $Z \log N$ blocks, and since $Z$ is a small constant, resulting usage is $\BigO{\log N}$ in the number of blocks. } \end{frame} @@ -140,14 +168,15 @@ \begin{frame}{\subsecname} \begin{block}{Client storage size} - The position map is of size $N L = N \log N$ bits, which is of size $\BigO{N}$ blocks when the block size $\BigOmega{\log N}$. + The position map is of size $N L = N \log N$ bits, which is of size $\BigO{N}$ blocks when the block size $\BigOmega{\log N}$ bits. + The stash is at most $\BigO{\log N} \cdot \omega (1)$ blocks to obtain negligible failure probability. The recursive construction can achieve client storage of $\BigO{\log N} \cdot \omega (1)$. \end{block} \note{ - Position map is the order of $N$ and the stash is order of $\log N$ for negligible error probability. - So, for the basic PAthORAM the client storage usage is order fo $N$. + Position map is the order of $N$ and the stash is the order of $\log N$ for negligible error probability. + So, for the basic PathORAM the client storage usage is order fo $N$. However, it is possible to use recursive version of the ORAM, which I will talk about later, to lower the usage to the order of $\log N$. } \end{frame} @@ -168,8 +197,10 @@ \note{ The client stash $S$ is initially empty. - The server buckets are initialized to contain random encryptions of the dummy block. + The server buckets are initialized to contain randomized encryptions of the dummy blocks. The client's position map is filled with independent random numbers between 0 and $2^L - 1$. + + When the access occurs, the requested block is remapped. } \end{frame} @@ -203,7 +234,7 @@ ]{listings/algorithm.tex} \note{ - If the access is a write, update the data stored for block $a$. + If the access is a \texttt{write}, update the data stored for block $a$. } \end{frame} @@ -266,7 +297,7 @@ $\textsc{WriteBucket}(bucket, blocks)$ writes blocks in the bucket on the server. It pads blocks to make it $Z$ blocks total. - Remember that all bucket should be filled to make them indistinguishable to adversary. + Remember that all buckets should be filled to make them indistinguishable to adversary. Blocks are encrypted as they are written using randomized scheme --- every cipher text is different for the same plaintext. } \end{frame} @@ -329,8 +360,8 @@ \note{ Our definition of security requires that the access pattern is indistinguishable from random. - Encrypted paths look random enough by the definition of security --- and we use randomized encryption since deterministic one does not even satisfy chosen plaintext attack security. - The access pattern is random enough since the probability of a particular access is $\frac{1}{2^L}$ --- uniform in the number of blocks because each access the position table is re-randomized. + Encrypted paths look random enough by the definition of secure encryption --- and we use randomized encryption since deterministic one does not even satisfy chosen plaintext attack security. + The access pattern is random enough since the probability of a particular sequence is $\frac{1}{2^L}$ --- uniform in the number of blocks because each access the position table is re-randomized. For $M$ access, the probability is even smaller by the Bayes rule. } \end{frame} diff --git a/sections/problem-definition.tex b/sections/problem-definition.tex index f5f1ea5..fbfff05 100644 --- a/sections/problem-definition.tex +++ b/sections/problem-definition.tex @@ -26,9 +26,11 @@ Can then be implemented in hardware~\cite{Maas:EECS-2014-89}. \note{ - Another goal is simplicity. + On of the goals is simplicity. It not only allows us to analyze system easily. In case of Path ORAM, it is so simple that it has been implemented in pure hardware. + + One of the works that puts PAthORAM on the silicon is written by \textcite{Maas:EECS-2014-89}. % chktex 8 } \end{frame} @@ -48,8 +50,8 @@ \note{ Let us define what we mean by access pattern. - We mean a sequence of operation --- writes and reads --- one some blocks with identifiers $\text{a}_i$ reading or writing some $\text{data}_i$. - Say we have $M$ operation in sequence. + We mean a sequence of operations --- writes and reads --- on some blocks with identifiers $\text{a}_i$ reading or writing some $\text{data}_i$. + Say we have $M$ operations in sequence. } \end{frame} diff --git a/sections/recursion-and-parametrization.tex b/sections/recursion-and-parametrization.tex index 12a62e2..110cbd8 100644 --- a/sections/recursion-and-parametrization.tex +++ b/sections/recursion-and-parametrization.tex @@ -12,6 +12,8 @@ One of the ways to make the client even thinner is to use recursion. Zero-level ORAM contains data blocks, the position map of $i_\text{th}$ ORAM is stored in the ${(i+1)}_\text{st}$ ORAM, and the client stores the position map for the last ORAM\@. The access to a block in a zero-level ORAM triggers recursive calls all up to the last ORAM\@. + + The idea of recursion was first described in the works of \textcite{Shi:2011} and \textcite{DBLP:journals/corr/abs-1106-3652}. % chktex 8 } \end{frame} diff --git a/settings.tex b/settings.tex index 7c17e51..006b0da 100644 --- a/settings.tex +++ b/settings.tex @@ -2,9 +2,7 @@ \newfontfamily{\FA}[Path = fonts/]{FontAwesome.otf} -\ifnotes% - \setbeameroption{show notes on second screen} -\fi +\setbeameroption{\notesOption} \title{Data-X Talk} % chktex 13 @@ -89,27 +87,3 @@ fit, calc } -\tikzset{ - dia/.style={ - shape=diamond, - minimum size=2em, - }, - dia cross/.style={ - dia, -% path picture={ -% \draw (path picture bounding box.west) -- (path picture bounding box.east) -% (path picture bounding box.north) -- (path picture bounding box.south); -% }, - append after command={ - \pgfextra% - \draw[shorten >=\pgflinewidth,shorten <=\pgflinewidth] - (\tikzlastnode.west) -- (\tikzlastnode.east); % chktex 8 - \draw[shorten >=\pgflinewidth,shorten <=\pgflinewidth] - (\tikzlastnode.north) -- (\tikzlastnode.south); % chktex 8 - \endpgfextra% - } - - } -} - - -- GitLab