@string{brics =	"{BRICS}"}
@string{daimi =	"Department of Computer Science, University of Aarhus"}
@string{iesd  =	"Department of Mathematics and Computer Science, Aalborg University"}
@string{rs    =	"Research Series"}
@string{ns    =	"Notes Series"}
@string{ls    =	"Lecture Series"}
@string{ds    =	"Dissertation Series"}

@TechReport{BRICS-DS-97-3,
  author = 	 "Husfeldt, Thore",
  title = 	 "Dynamic Computation",
  institution =  brics,
  year = 	 1997,
  type = 	 ds,
  number = 	 "DS-97-3",
  address = 	 daimi,
  month = 	 dec,
  note =	 "PhD thesis. 90~pp",
  abstract =	 "Chapter~1 contains a brief introduction and motivation of
		  dynamic computations, and illustrates the main
		  computational models used throughout the thesis, the
		  random access machine and the {\em cell probe model}
		  introduced by Fredman.\bibpar

		  Chapter~2 paves the road to proving lower bounds for
		  several dynamic problems. In particular, the chapter
		  identifies a number of key problems which are hard for
		  dynamic computations, and to which many other dynamic
		  problems can be reduced. The main contribution of this
		  chapter can be summarised in two results. The first shows
		  that the signed prefix sum problem, which has already
		  been heavily exploited for proving lower bounds on
		  dynamic algorithms and data structures, remains hard even
		  when we provide some amount of nondeterminism to the
		  query algorithms. The second result studies the amount of
		  extra information that can be provided to the query
		  algorithm without affecting the lower bound. Some
		  applications of these results are contained in this
		  chapter; in addition, they are heavily developed for the
		  lower bound proofs in the remainder of the thesis.\bibpar

		  Chapter~3 investigates the dynamic complexity of the
		  symmetric Boolean functions, and provides upper and lower
		  bounds. These results establish links between parallel
		  complexity (namely, Boolean circuit complexity) and
		  dynamic complexity. In particular, it is shown that the
		  circuit depth of any symmetric function and the dynamic
		  prefix problem for the same function depend on the same
		  combinatorial properties. The connection between these
		  two different modes and models of computation is shown to
		  be very strong in that the trade-off between circuit size
		  and circuit depth is similar to the trade-off between
		  update and query time.\bibpar

		  Chapter~4 considers dynamic graph problems. In
		  particular, it presents the fastest known algorithm for
		  dynamic reachability on planar acyclic digraphs with one
		  source and one sink (known as planar
		  {\em st-graphs}). Previous partial solutions to this
		  problem were known. In the second part of the chapter,
		  the techniques for lower bound from chapter~2 are further
		  exploited to yield new hardness results for a number of
		  graph problems, including reachability problems in planar
		  graphs and grid graphs, dynamic upward planarity testing
		  and monotone point location.\bibpar

		  Chapter~5 turns to strings, and focuses on the problem of
		  maintaining a string of parentheses, known as the dynamic
		  membership problem for the Dyck languages. Parentheses
		  are inserted and removed dynamically, while the algorithm
		  has to check whether the string is properly balanced. It
		  is shown that this problem can be solved in
		  polylogarithmic time per operation. The lower bound
		  techniques from the thesis are again used to prove the
		  hardness of this problem",
  linkhtmlabs =	 "",
  linkps =	 "",
  linkpdf =	 ""
}

@TechReport{BRICS-DS-97-2,
  author = 	 "{\O}rb{\ae}k, Peter",
  title = 	 "Trust and Dependence Analysis",
  institution =  brics,
  year = 	 1997,
  type = 	 ds,
  number = 	 "DS-97-2",
  address = 	 daimi,
  month = 	 jul,
  note =	 "PhD thesis. x+175~pp",
  abstract =	 "The two pillars of {\em trust analysis} and {\em
		  dependence algebra} form the foundation of this
		  thesis. Trust analysis is a static analysis of the
		  run-time trustworthiness of data. Dependence algebra is a
		  rich abstract model of data dependences in programming
		  languages, applicable to several kinds of
		  analyses.\bibpar

		  We have developed trust analyses for imperative languages
		  with pointers as well as for higher order functional
		  languages, utilizing both abstract interpretation,
		  constraint generation and type inference.\bibpar

		  The mathematical theory of dependence algebra has been
		  developed and investigated. Two applications of
		  dependence algebra have been given: a practical
		  implementation of a trust analysis for the C programming
		  language, and a soft type inference system for action
		  semantic specifications. Soundness results for the two
		  analyses have been established",
  linkhtmlabs =	 "",
  linkps =	 "",
  linkpdf =	 ""
}

@TechReport{BRICS-DS-97-1,
  author = 	 "Brodal, Gerth St{\o}lting",
  title = 	 "Worst Case Efficient Data Structures",
  institution =  brics,
  year = 	 1997,
  type = 	 ds,
  number = 	 "DS-97-1",
  address = 	 daimi,
  month = 	 jan,
  note =	 "PhD thesis. x+121~pp",
  abstract =	 "We study the design of efficient data structures where
		  each operation has a worst case efficient
		  implementations.  The concrete problems we consider are
		  {\em partial persistence}, implementation of {\em
		  priority queues}, and implementation of {\em
		  dictionaries}.\bibpar
		  
		  The {\em node copying\/} technique of Driscoll {\it et
		  al.}\ to make bounded in-degree and out-degree data
		  structures partially persistent, supports update steps in
		  amortized constant time and access steps in worst case
		  constant time.  We show how to extend the technique such
		  that update steps can be performed in worst case constant
		  time on the pointer machine model.\bibpar

		  We present two comparison based priority queue
		  implementations.  The first implementation supports {\sc
		  FindMin}, {\sc Insert} and {\sc Meld} in worst case
		  constant time and {\sc Delete} and {\sc DeleteMin} in
		  worst case time $O(\log n)$.  The second implementation
		  achieves the same performance, but furthermore supports
		  {\sc DecreaseKey} in worst case constant time.  We show
		  that these time bounds are optimal for all
		  implementations supporting {\sc Meld} in worst case time
		  $o(n)$.  We show that any randomized implementation with
		  expected amortized cost $t$ comparisons per {\sc Insert}
		  and {\sc Delete} operation has expected cost at least
		  $n/{2^{O(t)}}$ comparisons for {\sc FindMin}.\bibpar

		  For the CREW PRAM we present time and work optimal
		  priority queues, supporting {\sc FindMin}, {\sc Insert},
		  {\sc Meld}, {\sc DeleteMin}, {\sc Delete} and {\sc
		  DecreaseKey} in constant time with $O(\log n)$
		  processors.  To be able to speed up Dijkstra's algorithm
		  for the single-source shortest path problem we present a
		  different parallel priority data structure yielding an
		  implementation of Dijkstra's algorithm which runs in
		  $O(n)$ time and performs $O(m\log n)$ work on a CREW
		  PRAM.\bibpar

		  On a unit cost RAM model with word size $w$ bits we
		  consider the maintenance of a set of $n$ integers from
		  $0..2^w-1$ under {\sc Insert}, {\sc Delete}, {\sc
		  FindMin}, {\sc FindMax} and {\sc Pred} (predecessor
		  query).  The RAM operations used are addition, left and
		  right bit shifts, and bit-wise boolean operations.  For
		  any function $f(n)$ satisfying $\log\log n\leq f(n)\leq
		  \sqrt{\log n}$, we support {\sc FindMin} and {\sc
		  FindMax} in constant time, {\sc Insert} and {\sc Delete}
		  in $O(f(n))$ time, and {\sc Pred} in $O((\log n)/f(n))$
		  time.\bibpar

		  The last problem we consider is given a set of $n$ binary
		  strings of length $m$ each, to answer $d$--queries, {\it
		  i.e.}, given a binary string of length $m$ to report if
		  there exists a string in the set within Hamming distance
		  $d$ of the string. We present a data structure of size
		  $O(nm)$ supporting 1--queries in time $O(m)$ and the
		  reporting of all strings within Hamming distance 1 of the
		  query string in time $O(m)$.  The data structure can be
		  constructed in time $O(nm)$ and supports the insertion of
		  new strings in amortized time $O(m)$",
  longabstract =	 "We study the design of efficient data structures.  In
particular we focus on the design of data structures
where each operation has a worst case efficient
implementations.  The concrete problems we consider are
{\em partial persistence}, implementation of {\em
priority queues}, and implementation of {\em
dictionaries}.
		  
The first problem we consider is how to make bounded in-degree and
out-degree data structures partially persistent, {\it i.e.}, how to
remember old versions of a data structure for later access.  A {\em node
  copying\/} technique of Driscoll {\it et al.}\ supports update steps in
amortized constant time and access steps in worst case constant time.
The worst case time for an update step can be linear in the size of the
structure.  We show how to extend the technique of Driscoll {\em et
  al.\/} such that update steps can be performed in worst case constant
time on the pointer machine model.

We present two new comparison based priority queue implementations, with
the following properties.  The first implementation supports the
operations {\sc FindMin}, {\sc Insert} and {\sc Meld} in worst case
constant time and {\sc Delete} and {\sc DeleteMin} in worst case time
$O(\log n)$.  The priority queues can be implemented on the pointer
machine and require linear space.  The second implementation achieves the
same worst case performance, but furthermore supports {\sc DecreaseKey}
in worst case constant time.  The space requirement is again linear, but
the implementation requires auxiliary arrays of size $O(\log n)$. Our
bounds match the best known amortized bounds (achieved by respectively
binomial queues and Fibonacci heaps). The data structures presented are
the first achieving these worst case bounds, in particular supporting
{\sc Meld} in worst case constant time.  We show that these time bounds
are optimal for all implementations supporting {\sc Meld} in worst case
time $o(n)$.  We also present a tradeoff between the update time and the
query time of comparison based priority queue implementations.  Finally
we show that any randomized implementation with expected amortized cost
$t$ comparisons per {\sc Insert} and {\sc Delete} operation has expected
cost at least $n/{2^{O(t)}}$ comparisons for {\sc FindMin}.

Next we consider how to implement priority queues on parallel (comparison
based) models.  We present time and work optimal priority queues for the
CREW PRAM, supporting {\sc FindMin}, {\sc Insert}, {\sc Meld}, {\sc
  DeleteMin}, {\sc Delete} and {\sc DecreaseKey} in constant time with
$O(\log n)$ processors.  Our implementation is the first supporting all
of the listed operations in constant time.  To be able to speed up
Dijkstra's algorithm for the single-source shortest path problem we
present a different parallel priority data structure. With this
specialized data structure we give a parallel implementation of
Dijkstra's algorithm which runs in $O(n)$ time and performs $O(m\log n)$
work on a CREW PRAM. This represents a logarithmic factor improvement for
the running time compared with previous approaches.

We also consider priority queues on a RAM model which is stronger than
the comparison model.  The specific problem is the maintenance of a set
of $n$ integers in the range $0..2^w-1$ under the operations {\sc
  Insert}, {\sc Delete}, {\sc FindMin}, {\sc FindMax} and {\sc Pred}
(predecessor query) on a unit cost RAM with word size $w$ bits.  The RAM
operations used are addition, left and right bit shifts, and bit-wise
boolean operations.  For any function $f(n)$ satisfying $\log\log n\leq
f(n)\leq \sqrt{\log n}$, we present a data structure supporting {\sc
  FindMin} and {\sc FindMax} in worst case constant time, {\sc Insert}
and {\sc Delete} in worst case $O(f(n))$ time, and {\sc Pred} in worst
case $O((\log n)/f(n))$ time.  This represents the first priority queue
implementation for a RAM which supports {\sc Insert}, {\sc Delete} and
{\sc FindMin} in worst case time $O(\log\log n)$ --- previous bounds were
only amortized.  The data structure is also the first dictionary
implementation for a RAM which supports {\sc Pred} in worst case $O(\log
n/\log\log n)$ time while having worst case $O(\log\log n)$ update time.
Previous sublogarithmic dictionary implementations do not provide for
updates that are significantly faster than queries. The best solutions
known support both updates and queries in worst case time $O(\sqrt{\log
  n})$.

The last problem consider is the following dictionary problem over binary
strings.  Given a set of $n$ binary strings of length $m$ each, we want
to answer $d$--queries, {\it i.e.}, given a binary query string of length
$m$ to report if there exists a string in the set within Hamming distance
$d$ of the query string. We present a data structure of size $O(nm)$
supporting 1--queries in time $O(m)$ and the reporting of all strings
within Hamming distance 1 of the query string in time $O(m)$.  The data
structure can be constructed in time $O(nm)$.  The implementation
presented is the first achieving these optimal time bounds for the
preprocessing of the dictionary and for 1--queries.  The data structure
can be extended to support the insertion of new strings in amortized time
$O(m)$",
  linkhtmlabs =	 "",
  linkps =	 "",
  linkpdf =	 ""
}