@string{brics = "{BRICS}"}
@string{daimi = "Department of Computer Science, University of Aarhus"}
@string{iesd = "Department of Mathematics and Computer Science, Aalborg University"}
@string{rs = "Research Series"}
@string{ns = "Notes Series"}
@string{ls = "Lecture Series"}
@string{ds = "Dissertation Series"}
@TechReport{BRICS-LS-96-6,
author = "Bra{\"u}ner, Torben",
title = "Introduction to Linear Logic",
institution = brics,
year = 1996,
type = ls,
number = "LS-96-6",
address = daimi,
month = dec,
note = "iiiv+55~pp",
abstract = "The main concern of this report is to give an
introduction to Linear Logic. For pedagogical purposes we
shall also have a look at Classical Logic as well as
Intuitionistic Logic. Linear Logic was introduced by
J.-Y. Girard in 1987 and it has attracted much attention
from computer scientists, as it is a logical way of
coping with resources and resource control. The focus of
this technical report will be on proof-theory and
computational interpretation of proofs, that is, we will
focus on the question of how to interpret proofs as
programs and reduction (cut-elimination) as
evaluation. We first introduce Classical Logic. This is
the fundamental idea of the proofs-as-programs
paradigm. Cut-elimination for Classical Logic is highly
non-deterministic; it is shown how this can be remedied
either by moving to Intuitionistic Logic or to Linear
Logic. In the case on Linear Logic we consider
Intuitionistic Linear Logic as well as Classical Linear
Logic. Furthermore, we take a look at the Girard
Translation translating Intuitionistic Logic into
Intuitionistic Linear Logic. Also, we give a brief
introduction to some concrete models of Intuitionistic
Linear Logic. No proofs will be given except that a proof
of cut-elimination for the multiplicative fragment of
Classical Linear Logic is included in an appendix
\subsubsection*{Contents}
\begin{itemize}
\item[1] Classical and Intuitionistic Logic
\begin{itemize}
\item[1.1] Classical Logic
\item[1.2] Intuitionistic Logic
\item[1.3] The $\lambda $-Calculus
\end{itemize}
\item[1.4] The Curry-Howard Isomorphism
\item[2] Linear Logic
\begin{itemize}
\item[2.1] Classical Linear Logic
\item[2.2] Intuitionistic Linear Logic
\item[2.3] A Digression - Russell's Paradox and Linear
Logic
\item[2.4] The Linear $\lambda $-Calculus
\item[2.5] The Curry-Howard Isomorphism
\item[2.6] The Girard Translation
\item[2.7] Concrete Models
\end{itemize}
\item[A] Logics
\begin{itemize}
\item[A.1] Classical Logic
\item[A.2] Intuitionistic Logic
\item[A.3] Classical Linear Logic
\item[A.4] Intuitionistic Linear Logic
\end{itemize}
\item[B] Cut-Elimination for Classical Linear Logic
\begin{itemize}
\item[B.1]
\item[B.2] Putting the Proof Together
\end{itemize}
\end{itemize}",
linkhtmlabs = "",
linkdvi = "",
linkps = "",
linkpdf = ""
}
@TechReport{BRICS-LS-96-5,
author = "Dubhashi, Devdatt P.",
title = "What Can't You Do With LP?",
institution = brics,
year = 1996,
type = ls,
number = "LS-96-5",
address = daimi,
month = dec,
note = "viii+23~pp",
abstract = "These notes from the BRICS course ``Pearls of Theory''
are an introduction to Linear Programming and its use in
solving problems in Combinatorics and in the design and
analysis of algorithms for combinatorial problems.
\subsubsection*{Contents}
\begin{itemize}
\item[1] Dr.\ Cheng's Diet and LP
\item[2] LP versus NP: A Panacea?
\item[3] Duality
\item[4] Linear versus Integer
\item[5] Luck: Unimodularity
\item[6] Rounding
\item[7] Randomised Rounding
\item[8] Primal--Dual
\item[9] If You Want to Prospect for more Pearls ...
\item[10] Problems
\end{itemize}",
linkhtmlabs = "",
linkps = "",
linkpdf = ""
}
@techreport{BRICS-LS-96-4,
author = "Skyum, Sven",
title = "A Non-Linear Lower Bound for Monotone Circuit
Size",
institution = brics,
year = 1996,
type = ls,
number = "LS-96-4",
address = daimi,
month = dec,
note = "viii+14~pp",
abstract = "In complexity theory we are faced with the
frustrating situation that we are able to prove
very few non trivial lower bounds. In the area
of combinatorial complexity theory which this
note is about, the situation can be described
quite precisely in the sense that although
almost all functions are very complex (have
exponential circuit size), no one has been able
to prove any non-linear lower bounds for
explicitly given functions.\bibpar
One alley which is often taken is to restrict
the models to make larger lower bounds more
likely. Until 1985 no non-linear lower bounds
were known for monotone circuit size either
(the best lower bound was $4n$). In 1985,
Razborov [1] proved a {\em super polynomial}
lower bound for a specific family of Boolean
functions.\bibpar
Razborov proved the following:
\begin{quote}
Any family of monotone Boolean circuits
accepting graphs containing cliques of size
$\lfloor{n/2}\rfloor$ has super polynomial
size when the graphs are represented by their
incidence matrices.
\end{quote}
The main purpose of this note is to give a
``simple'' version of Razborov's proof. This
leads to a weaker result but the proof contains
all the ingredients of Razborov's proof.\bibpar
We will prove:
\begin{quote}
Any family of monotone Boolean circuits
accepting graphs containing cliques of size 3
({\em triangles}) has size $\Omega(n^3/\log
^4n)$.
\end{quote}
In Section 1 we introduce Boolean functions and
the complexity model we are going to use,
namely {\em Boolean circuits}. In Section 2 the
appropriate definitions of graphs are given and
some combinatorial properties about them are
proven. Section 3 contains the proof of the
main result.
\begin{itemize}
\item[\mbox{[1]}] A.~A. Razborov: Lower bounds
on the monotone complexity of some Boolean
functions, {\em Dokl. Akad. Nauk SSSR 281(4)
(1985) 798 - 801 (In Russian); English
translation in: Soviet Math. Dokl. 31 (1985)
354--57}.
\end{itemize}
\subsubsection*{Contents}
\begin{itemize}
\item[1]Boolean functions and circuits
\item[2]Graphs and Combinatorial lemmas
\item[3]Putting things together
\item[4]Problems
\end{itemize}
",
linkhtmlabs = "",
linkdvi = "",
linkps = ""
}
@techreport{BRICS-LS-96-3,
author = "Rose, Kristoffer H.",
title = "Explicit Substitution -- Tutorial \& Survey",
institution = brics,
year = 1996,
type = ls,
number = "LS-96-3",
address = daimi,
month = sep,
note = "v+150~pp",
abstract = "These lecture notes are from the BRICS
mini-course ``Explicit Substitution'' taught at
University of Aarhus, October 27, 1996.\bibpar
We give a coherent overview of the area of
explicit substitution and some applications.
The focus is on the {\em operational} or {\em
syntactic} side of things, in particular we
will not cover the areas of semantics and type
systems for explicit substitution calculi.
Emphasis is put on providing a universal
understanding of the very different techniques
used by various authors in the area.
\subsubsection*{Contents}
\begin{itemize}
\item[1]Explicit Substitution Rules
\begin{itemize}
\item[1.1]Explicit Substitution
\item[1.2]Preservation of Strong
Normalisation (PSN)
\item[1.3]Discussion
\end{itemize}
\item[2]Explicit Binding
\begin{itemize}
\item[2.1]Namefree $\lambda$-Calculus
\item[2.2]Explicit Binding Variations for
Explicit Substitution
\item[2.3]Discussion
\end{itemize}
\item[3]Explicit Addresses
\begin{itemize}
\item[3.1]$\lambda$-graphs and Explicit
Sharing
\item[3.2]Explicit Substitution \& Sharing
\item[3.3]Discussion
\end{itemize}
\item[4]Higher-Order Rewriting and Functional
Programs
\begin{itemize}
\item[4.1]Combinatory Reduction Systems (CRS)
\item[4.2]Explicit Substitutes
\item[4.3]Explicit Addresses
\item[4.4]CRS and Functional Programs
\item[4.5]Discussion
\end{itemize}
\item[5]Strategies and Abstract Machines
\begin{itemize}
\item[5.1]Strategies for $\lambda$-Calculus
\item[5.2]Calculi for Weak Reduction with
Sharing
\item[5.3]Reduction Strategies
\item[5.4]Constructors and Recursion
\item[5.5]A Space Leak Free Calculus
\item[5.6]Discussion
\end{itemize}
\item[A]Appendices: Preliminaries
\begin{itemize}
\item[A.1]Reductions
\item[A.2]Inductive Notations
\item[A.3]$\lambda$-Calculus
\end{itemize}
\end{itemize}
",
linkhtmlabs = "",
linkps = ""
}
@techreport{BRICS-LS-96-2,
author = "Albers, Susanne",
title = "Competitive Online Algorithms",
institution = brics,
year = 1996,
type = ls,
number = "LS-96-2",
address = daimi,
month = sep,
note = "iix+57 pp",
abstract = "These lecture notes are from the mini-course
``Competitive Online Algorithms'' taught at
Aarhus University, August 27--29, 1996.\bibpar
The mini-course consisted of three lectures. In
the first lecture we gave basic definitions and
presented important techniques that are used in
the study on online algorithms. The paging
problem was always the running example to
explain and illustrate the material. We also
discussed the $k$-server problem, which is a
very well-studied generalization of the paging
problem.\bibpar
The second lecture was concerned with
self-organizing data structures, in particular
self-organizing linear lists. We presented
results on deterministic and randomized online
algorithms. Furthermore, we showed that linear
lists can be used to build very effective data
compression schemes and reported on theoretical
as well as experimental results.\bibpar
In the third lecture we discussed three
application areas in which interesting online
problems arise. The areas were (1) distributed
data management, (2) scheduling and load
balancing, and (3) robot navigation and
exploration. In each of these fields we gave
some important results.
\subsubsection*{Contents}
\begin{itemize}
\item[1]Online algorithms and competitive
analysis
\begin{itemize}
\item[1.1]Basic definitions
\item[1.2]Results on deterministic paging
algorithms
\end{itemize}
\item[2]Randomization in online algorithms
\begin{itemize}
\item[2.1]General concepts
\item[2.2]Randomized paging algorithms
against oblivious adversaries
\end{itemize}
\item[3]Proof techniques
\begin{itemize}
\item[3.1]Potential functions
\item[3.2]Yao's minimax principle
\end{itemize}
\item[4]The $k$-server problem
\item[5]The list update problem
\begin{itemize}
\item[5.1]Deterministic online algorithms
\item[5.2]Randomized online algorithms
\item[5.3]Average case analyses of list
update algorithms
\end{itemize}
\item[6]Data compression based on linear lists
\begin{itemize}
\item[6.1]Theoretical results
\item[6.2]Experimental results
\item[6.3]The compression algorithm by
Burrows and Wheeler
\end{itemize}
\item[7]Distributed data management
\begin{itemize}
\item[7.1]Formal definition of migration and
replication problems
\item[7.2]Page migration
\item[7.3]Page replication
\item[7.4]Page allocation
\end{itemize}
\item[8]Scheduling and load balancing
\begin{itemize}
\item[8.1]Scheduling
\item[8.2]Load balancing
\end{itemize}
\item[9]Robot navigation and exploration
\end{itemize}
",
linkhtmlabs = "",
linkdvi = "",
linkps = ""
}
@techreport{BRICS-LS-96-1,
author = "Arge, Lars",
title = "External-Memory Algorithms with Applications
in Geographic Information Systems",
institution = brics,
year = 1996,
type = ls,
number = "LS-96-1",
address = daimi,
month = sep,
note = "iix+53~pp",
abstract = "In the design of algorithms for large-scale applications
it is essential to consider the problem of minimizing
Input/Output (I/O) communication. Geographical
information systems (GIS) are good examples of such
large-scale applications as they frequently handle huge
amounts of spatial data. In this note we survey the
recent developments in external-memory algorithms with
applications in GIS. First we discuss the Aggarwal-Vitter
I/O-model and illustrate why normal internal-memory
algorithms for even very simple problems can perform
terribly in an I/O-environment. Then we describe the
fundamental paradigms for designing I/O-efficient
algorithms by using them to design efficient sorting
algorithms. We then go on and survey external-memory
algorithms for computational geometry problems---with
special emphasis on problems with applications in
GIS---and techniques for designing such algorithms: Using
the orthogonal line segment intersection problem we
illustrate the {\em distribution-sweeping\/} and the {\em
buffer tree\/} techniques which can be used to solve a
large number of important problems. Using the batched
range searching problem we introduce the {\em external
segment tree\/}. We also discuss an algorithm for the
reb/blue line segment intersection problem---an important
subproblem in map overlaying. In doing so we introduce
the {\em batched filtering\/} and the {\em external
fractional cascading\/} techniques. Finally, we shortly
describe TPIE---a Transparent Parallel I/O Environment
designed to allow programmers to write I/O-efficient
programs.\bibpar
These lecture notes were made for the CISM Advanced
School on Algorithmic Foundations of Geographic
Information Systems, September 16--20, 1996, Udine,
Italy.
\subsubsection*{Contents}
\begin{itemize}
\item[1] Introduction
\begin{itemize}
\item[1.1] The Parallel Disk Model
\item[1.2] Outline
\end{itemize}
\item[2] RAM-Complexity and I/O-Complexity
\begin{itemize}
\item[2.1] Fundamental External-Memory Bounds
\item[2.2] Summary
\end{itemize}
\item[3] Paradigms for Designing I/O-efficient Algorithms
\begin{itemize}
\item[3.1] Merge Sort
\item[3.2] Distribution Sort
\item[3.3] Buffer Tree Sort
\item[3.4] Sorting on Parallel Disks
\item[3.5] Summary
\end{itemize}
\item[4] External-Memory Computational Geometry Algorithms
\begin{itemize}
\item[4.1] The Orthogonal Line Segment Intersection Problem
\item[4.2] The Batched Range Searching Problem
\item[4.3] The Red/Blue Line Segment Intersection Problem
\item[4.4] Other External-Memory Computational Geometry
Algorithms
\item[4.5] Summary
\end{itemize}
\item[5] TPIE --- A Transparent Parallel I/O Environment
\item[6] Conclusions
\end{itemize}",
linkhtmlabs = "",
linkpdf = "",
linkps = ""
}