+ my $file = $cache_file_name;
+ open ($cache_fd, '+>>', $file) || error ("unable to write $file: $!");
+ flock ($cache_fd, LOCK_EX) || error ("unable to lock $file: $!");
+ seek ($cache_fd, 0, 0) || error ("unable to rewind $file: $!");
+
+ my $mtime = (stat($cache_fd))[9];
+
+ if ($mtime + $cache_max_age < time) {
+ print STDERR "$progname: cache is too old\n" if ($verbose);
+ return ();
+ }
+
+ my $odir = <$cache_fd>;
+ $odir =~ s/[\r\n]+$//s if defined ($odir);
+ if (!defined ($odir) || ($dir ne $odir)) {
+ print STDERR "$progname: cache is for $odir, not $dir\n"
+ if ($verbose && $odir);
+ return ();
+ }
+
+ my @files = ();
+ while (<$cache_fd>) {
+ s/[\r\n]+$//s;
+ push @files, "$odir/$_";
+ }
+
+ print STDERR "$progname: " . ($#files+1) . " files in cache\n"
+ if ($verbose);
+
+ $read_cache_p = 1;
+ return @files;
+}
+
+
+sub write_cache($) {
+ my ($dir) = @_;
+
+ return unless ($cache_p);
+
+ # If we read the cache, just close it without rewriting it.
+ # If we didn't read it, then write it now.
+
+ if (! $read_cache_p) {
+
+ truncate ($cache_fd, 0) ||
+ error ("unable to truncate $cache_file_name: $!");
+ seek ($cache_fd, 0, 0) ||
+ error ("unable to rewind $cache_file_name: $!");
+
+ if ($#all_files >= 0) {
+ print $cache_fd "$dir\n";
+ foreach (@all_files) {
+ my $f = $_; # stupid Perl. do this to avoid modifying @all_files!
+ $f =~ s@^\Q$dir/@@so || die; # remove $dir from front
+ print $cache_fd "$f\n";
+ }
+ }
+
+ print STDERR "$progname: cached " . ($#all_files+1) . " files\n"
+ if ($verbose);
+ }
+
+ flock ($cache_fd, LOCK_UN) ||
+ error ("unable to unlock $cache_file_name: $!");
+ close ($cache_fd);
+ $cache_fd = undef;
+}
+
+
+sub html_unquote($) {
+ my ($h) = @_;
+
+ # This only needs to handle entities that occur in RSS, not full HTML.
+ my %ent = ( 'amp' => '&', 'lt' => '<', 'gt' => '>',
+ 'quot' => '"', 'apos' => "'" );
+ $h =~ s/(&(\#)?([[:alpha:]\d]+);?)/
+ {
+ my ($o, $c) = ($1, $3);
+ if (! defined($2)) {
+ $c = $ent{$c}; # for <
+ } else {
+ if ($c =~ m@^x([\dA-F]+)$@si) { # for A
+ $c = chr(hex($1));
+ } elsif ($c =~ m@^\d+$@si) { # for A
+ $c = chr($c);
+ } else {
+ $c = undef;
+ }
+ }
+ ($c || $o);
+ }
+ /gexi;
+ return $h;
+}
+
+
+
+sub set_proxy($) {
+ my ($ua) = @_;
+
+ if (!defined($ENV{http_proxy}) && !defined($ENV{HTTP_PROXY})) {
+ my $proxy_data = `scutil --proxy 2>/dev/null`;
+ my ($server) = ($proxy_data =~ m/\bHTTPProxy\s*:\s*([^\s]+)/s);
+ my ($port) = ($proxy_data =~ m/\bHTTPPort\s*:\s*([^\s]+)/s);
+ if ($server) {
+ # Note: this ignores the "ExceptionsList".
+ $ENV{http_proxy} = "http://" . $server . ($port ? ":$port" : "") . "/";
+ print STDERR "$progname: MacOS proxy: $ENV{http_proxy}\n"
+ if ($verbose > 2)
+ }
+ }
+
+ $ua->env_proxy();
+}
+
+
+sub init_lwp() {
+ if (! defined ($LWP::Simple::ua)) {
+ error ("\n\n\tPerl is broken. Do this to repair it:\n" .
+ "\n\tsudo cpan LWP::Simple\n");
+ }
+ set_proxy ($LWP::Simple::ua);
+}
+
+
+# Returns a list of the image enclosures in the RSS or Atom feed.
+# Elements of the list are references, [ "url", "guid" ].
+#
+sub parse_feed($);
+sub parse_feed($) {
+ my ($url) = @_;
+
+ init_lwp();
+ $LWP::Simple::ua->agent ("$progname/$version");
+ $LWP::Simple::ua->timeout (10); # bail sooner than the default of 3 minutes
+
+ my $body = (LWP::Simple::get($url) || '');
+
+ if ($body !~ m@^<\?xml\s@si) {
+ # Not an RSS/Atom feed. Try RSS autodiscovery.
+
+ # (Great news, everybody: Flickr no longer provides RSS for "Sets",
+ # only for "Photostreams", and only the first 20 images of those.
+ # Thanks, assholes.)
+
+ error ("not an RSS or Atom feed, or HTML: $url")
+ unless ($body =~ m@<(HEAD|BODY|A|IMG)\b@si);
+
+ # Find the first <link> with RSS or Atom in it, and use that instead.
+
+ $body =~ s@<LINK\s+([^<>]*)>@{
+ my $p = $1;
+ if ($p =~ m! \b REL \s* = \s* ['"]? alternate \b!six &&
+ $p =~ m! \b TYPE \s* = \s* ['"]? application/(atom|rss) !six &&
+ $p =~ m! \b HREF \s* = \s* ['"] ( [^<>'"]+ ) !six
+ ) {
+ my $u2 = html_unquote ($1);
+ if ($u2 =~ m!^/!s) {
+ my ($h) = ($url =~ m!^([a-z]+://[^/]+)!si);
+ $u2 = "$h$u2";
+ }
+ print STDERR "$progname: found feed: $u2\n"
+ if ($verbose);
+ return parse_feed ($u2);
+ }
+ '';
+ }@gsexi;
+
+ error ("no RSS or Atom feed for HTML page: $url");
+ }
+
+
+ $body =~ s@(<ENTRY|<ITEM)@\001$1@gsi;
+ my @items = split(/\001/, $body);
+ shift @items;
+
+ my @imgs = ();
+ my %ids;
+
+ foreach my $item (@items) {
+ my $iurl = undef;
+ my $id = undef;
+
+ # First look for <link rel="enclosure" href="...">
+ #
+ if (! $iurl) {
+ $item =~ s!(<LINK[^<>]*>)!{
+ my $link = $1;
+ my ($rel) = ($link =~ m/\bREL\s*=\s*[\"\']?([^<>\'\"]+)/si);
+ my ($type) = ($link =~ m/\bTYPE\s*=\s*[\"\']?([^<>\'\"]+)/si);
+ my ($href) = ($link =~ m/\bHREF\s*=\s*[\"\']([^<>\'\"]+)/si);
+
+ if ($rel && lc($rel) eq 'enclosure') {
+ if ($type) {
+ $href = undef unless ($type =~ m@^image/@si); # omit videos
+ }
+ $iurl = html_unquote($href) if $href;
+ }
+ $link;
+ }!gsexi;
+ }
+
+ # Then look for <media:content url="...">
+ #
+ if (! $iurl) {
+ $item =~ s!(<MEDIA:CONTENT[^<>]*>)!{
+ my $link = $1;
+ my ($href) = ($link =~ m/\bURL\s*=\s*[\"\']([^<>\'\"]+)/si);
+ $iurl = html_unquote($href) if $href;
+ $link;
+ }!gsexi;
+ }
+
+ # Then look for <enclosure url="..."/>
+ #
+ if (! $iurl) {
+ $item =~ s!(<ENCLOSURE[^<>]*>)!{
+ my $link = $1;
+ my ($type) = ($link =~ m/\bTYPE\s*=\s*[\"\']?([^<>\'\"]+)/si);
+ my ($href) = ($link =~ m/\bURL\s*=\s*[\"\']([^<>\'\"]+)/si);
+ $iurl = html_unquote($href)
+ if ($href && $type && $type =~ m@^image/@si); # omit videos
+ $link;
+ }!gsexi;
+ }
+
+ # Ok, maybe there's an image in the <url> field?
+ #
+ if (! $iurl) {
+ $item =~ s!((<URL\b[^<>]*>)([^<>]*))!{
+ my ($all, $u2) = ($1, $3);
+ $iurl = html_unquote($u2) if ($u2 =~ m/$good_file_re/io);
+ $all;
+ }!gsexi;
+ }
+
+ # Then look for <description>... with an <img src="..."> inside.
+ #
+ if (! $iurl) {
+ $item =~ s!(<description[^<>]*>.*?</description>)!{
+ my $desc = $1;
+ $desc = html_unquote($desc);
+ my ($href) = ($desc =~ m@<IMG[^<>]*\bSRC=[\"\']?([^\"\'<>]+)@si);
+ $iurl = $href if ($href);
+ $desc;
+ }!gsexi;
+ }
+
+ # Could also do <content:encoded>, but the above probably covers all
+ # of the real-world possibilities.
+
+
+ # Find a unique ID for this image, to defeat image farms.
+ # First look for <id>...</id>
+ ($id) = ($item =~ m!<ID\b[^<>]*>\s*([^<>]+?)\s*</ID>!si) unless $id;
+
+ # Then look for <guid isPermaLink=...> ... </guid>
+ ($id) = ($item =~ m!<GUID\b[^<>]*>\s*([^<>]+?)\s*</GUID>!si) unless $id;
+
+ # Then look for <link> ... </link>
+ ($id) = ($item =~ m!<LINK\b[^<>]*>\s*([^<>]+?)\s*</LINK>!si) unless $id;
+
+
+ if ($iurl) {
+ $id = $iurl unless $id;
+ my $o = $ids{$id};
+ if (! $o) {
+ $ids{$id} = $iurl;
+ my @P = ($iurl, $id);
+ push @imgs, \@P;
+ } elsif ($iurl ne $o) {
+ print STDERR "$progname: WARNING: dup ID \"$id\"" .
+ " for \"$o\" and \"$iurl\"\n";
+ }
+ }
+ }
+
+ return @imgs;
+}
+
+
+# Like md5_base64 but uses filename-safe characters.
+#
+sub md5_file($) {
+ my ($s) = @_;
+ $s = md5_base64($s);
+ $s =~ s@[/]@_@gs;
+ $s =~ s@[+]@-@gs;
+ return $s;
+}
+
+
+# Given the URL of an image, download it into the given directory
+# and return the file name.
+#
+sub download_image($$$) {
+ my ($url, $uid, $dir) = @_;
+
+ my $url2 = $url;
+ $url2 =~ s/\#.*$//s; # Omit search terms after file extension
+ $url2 =~ s/\?.*$//s;
+ my ($ext) = ($url2 =~ m@\.([a-z\d]+)$@si);
+
+ # If the feed hasn't put a sane extension on their URLs, nothing's going
+ # to work. This code assumes that file names have extensions, even the
+ # ones in the cache directory.
+ #
+ if (! $ext) {
+ print STDERR "$progname: skipping extensionless URL: $url\n"
+ if ($verbose > 1);
+ return undef;
+ }
+
+ # Don't bother downloading files that we will reject anyway.
+ #
+ if (! ($url2 =~ m/$good_file_re/io)) {
+ print STDERR "$progname: skipping non-image URL: $url\n"
+ if ($verbose > 1);
+ return undef;
+ }
+
+ my $file = md5_file ($uid);
+ $file .= '.' . lc($ext) if $ext;
+
+ # Don't bother doing If-Modified-Since to see if the URL has changed.
+ # If we have already downloaded it, assume it's good.
+ if (-f "$dir/$file") {
+ print STDERR "$progname: exists: $dir/$file for $uid / $url\n"
+ if ($verbose > 1);
+ return $file;
+ }
+
+ # Special-case kludge for Flickr:
+ # Their RSS feeds sometimes include only the small versions of the images.
+ # So if the URL ends in one of the "small-size" letters, change it to "b".
+ #
+ # _o orig, 1600 +
+ # _k large, 2048 max
+ # _h large, 1600 max
+ # _b large, 1024 max
+ # _c medium, 800 max
+ # _z medium, 640 max
+ # "" medium, 500 max
+ # _n small, 320 max
+ # _m small, 240 max
+ # _t thumb, 100 max
+ # _q square, 150x150
+ # _s square, 75x75
+ #
+ $url =~ s@_[sqtmnzc](\.[a-z]+)$@_b$1@si
+ if ($url =~ m@^https?://[^/?#&]*?flickr\.com/@si);
+
+ print STDERR "$progname: downloading: $dir/$file for $uid / $url\n"
+ if ($verbose > 1);
+ init_lwp();
+ $LWP::Simple::ua->agent ("$progname/$version");
+ my $status = LWP::Simple::mirror ($url, "$dir/$file");
+ if (!LWP::Simple::is_success ($status)) {
+ print STDERR "$progname: error $status: $url\n"; # keep going
+ }
+
+ return $file;
+}
+
+
+sub mirror_feed($) {
+ my ($url) = @_;
+
+ if ($url !~ m/^https?:/si) { # not a URL: local directory.
+ return (undef, $url);
+ }
+
+ my $dir = "$ENV{HOME}/Library/Caches"; # MacOS location
+ if (-d $dir) {
+ $dir = "$dir/org.jwz.xscreensaver.feeds";
+ } elsif (-d "$ENV{HOME}/.cache") { # Gnome "FreeDesktop XDG" location
+ $dir = "$ENV{HOME}/.cache/xscreensaver";
+ if (! -d $dir) { mkdir ($dir) || error ("mkdir $dir: $!"); }
+ $dir .= "/feeds";
+ if (! -d $dir) { mkdir ($dir) || error ("mkdir $dir: $!"); }
+ } elsif (-d "$ENV{HOME}/tmp") { # If ~/.tmp/ exists, use it.
+ $dir = "$ENV{HOME}/tmp/.xscreensaver-feeds";
+ } else {
+ $dir = "$ENV{HOME}/.xscreensaver-feeds";
+ }
+
+ if (! -d $dir) {
+ mkdir ($dir) || error ("mkdir $dir: $!");
+ print STDERR "$progname: mkdir $dir/\n" if ($verbose);
+ }
+
+ # MD5 for directory name to use for cache of a feed URL.
+ $dir .= '/' . md5_file ($url);
+
+ if (! -d $dir) {
+ mkdir ($dir) || error ("mkdir $dir: $!");
+ print STDERR "$progname: mkdir $dir/ for $url\n" if ($verbose);
+ }
+
+ # At this point, we have the directory corresponding to this URL.
+ # Now check to see if the files in it are up to date, and download
+ # them if not.
+
+ my $stamp = '.timestamp';
+ my $lock = "$dir/$stamp";
+
+ print STDERR "$progname: awaiting lock: $lock\n"
+ if ($verbose > 1);
+
+ my $mtime = ((stat($lock))[9]) || 0;
+
+ my $lock_fd;
+ open ($lock_fd, '+>>', $lock) || error ("unable to write $lock: $!");
+ flock ($lock_fd, LOCK_EX) || error ("unable to lock $lock: $!");
+ seek ($lock_fd, 0, 0) || error ("unable to rewind $lock: $!");
+
+ my $poll_p = ($mtime + $feed_max_age < time);
+
+ # --no-cache cmd line arg means poll again right now.
+ $poll_p = 1 unless ($cache_p);
+
+ # Even if the cache is young, make sure there is at least one file,
+ # and re-check if not.
+ #
+ if (! $poll_p) {
+ my $count = 0;
+ opendir (my $dirh, $dir) || error ("$dir: $!");
+ foreach my $f (readdir ($dirh)) {
+ next if ($f =~ m/^\./s);
+ $count++;
+ last;
+ }
+ closedir $dirh;
+
+ if ($count <= 0) {
+ print STDERR "$progname: no files in cache of $url\n" if ($verbose);
+ $poll_p = 1;
+ }
+ }
+
+ if ($poll_p) {
+
+ print STDERR "$progname: loading $url\n" if ($verbose);
+
+ my %files;
+ opendir (my $dirh, $dir) || error ("$dir: $!");
+ foreach my $f (readdir ($dirh)) {
+ next if ($f eq '.' || $f eq '..');
+ $files{$f} = 0; # 0 means "file exists, should be deleted"
+ }
+ closedir $dirh;
+
+ $files{$stamp} = 1;
+
+ # Download each image currently in the feed.
+ #
+ my $count = 0;
+ my @urls = parse_feed ($url);
+ print STDERR "$progname: " . ($#urls + 1) . " images\n"
+ if ($verbose > 1);
+ foreach my $p (@urls) {
+ my ($furl, $id) = @$p;
+ my $f = download_image ($furl, $id, $dir);
+ next unless $f;
+ $files{$f} = 1; # Got it, don't delete
+ $count++;
+ }
+
+ my $empty_p = ($count <= 0);
+
+ # Now delete any files that are no longer in the feed.
+ # But if there was nothing in the feed (network failure?)
+ # then don't blow away the old files.
+ #
+ my $kept = 0;
+ foreach my $f (keys(%files)) {
+ if ($count <= 0) {
+ $kept++;
+ } elsif ($files{$f}) {
+ $kept++;
+ } else {
+ if (unlink ("$dir/$f")) {
+ print STDERR "$progname: rm $dir/$f\n" if ($verbose > 1);
+ } else {
+ print STDERR "$progname: rm $dir/$f: $!\n"; # don't bail
+ }
+ }
+ }
+
+ # Both feed and cache are empty. No files at all. Bail.
+ error ("empty feed: $url") if ($kept <= 1);
+
+ # Feed is empty, but we have some files from last time. Warn.
+ print STDERR "$progname: empty feed: using cache: $url\n"
+ if ($empty_p);
+
+ $mtime = time(); # update the timestamp
+
+ } else {
+
+ # Not yet time to re-check the URL.
+ print STDERR "$progname: using cache: $url\n" if ($verbose);
+
+ }
+
+ # Unlock and update the write date on the .timestamp file.
+ #
+ truncate ($lock_fd, 0) || error ("unable to truncate $lock: $!");
+ seek ($lock_fd, 0, 0) || error ("unable to rewind $lock: $!");
+ utime ($mtime, $mtime, $lock_fd) || error ("unable to touch $lock: $!");
+ flock ($lock_fd, LOCK_UN) || error ("unable to unlock $lock: $!");
+ close ($lock_fd);
+ $lock_fd = undef;
+ print STDERR "$progname: unlocked $lock\n" if ($verbose > 1);
+
+ # Don't bother using the imageDirectory cache. We know that this directory
+ # is flat, and we can assume that an RSS feed doesn't contain 100,000 images
+ # like ~/Pictures/ might.
+ #
+ $cache_p = 0;
+
+ # Return the URL and directory name of the files of that URL's local cache.
+ #
+ return ($url, $dir);
+}
+
+
+sub find_random_file($) {
+ my ($dir) = @_;
+
+ if ($use_spotlight_p == -1) {
+ $use_spotlight_p = 0;
+ if (-x '/usr/bin/mdfind') {
+ $use_spotlight_p = 1;
+ }
+ }
+
+ my $url;
+ ($url, $dir) = mirror_feed ($dir);
+
+ if ($url) {
+ $use_spotlight_p = 0;
+ print STDERR "$progname: $dir is cache for $url\n" if ($verbose > 1);
+ }
+
+ @all_files = read_cache ($dir);
+
+ if ($#all_files >= 0) {
+ # got it from the cache...
+
+ } elsif ($use_spotlight_p) {
+ print STDERR "$progname: spotlighting $dir...\n" if ($verbose);
+ spotlight_all_files ($dir);
+ print STDERR "$progname: found " . ($#all_files+1) .
+ " file" . ($#all_files == 0 ? "" : "s") .
+ " via Spotlight\n"
+ if ($verbose);
+ } else {
+ print STDERR "$progname: recursively reading $dir...\n" if ($verbose);
+ find_all_files ($dir);
+ print STDERR "$progname: " .
+ "f=" . ($#all_files+1) . "; " .
+ "d=$dir_count; " .
+ "s=$stat_count; " .
+ "skip=${skip_count_unstat}+$skip_count_stat=" .
+ ($skip_count_unstat + $skip_count_stat) .
+ ".\n"
+ if ($verbose);
+ }
+
+ write_cache ($dir);